]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxe/ecore_rdma.c
Merge openmp trunk r366426, resolve conflicts, and add FREEBSD-Xlist.
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxe / ecore_rdma.c
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File : ecore_rdma.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "bcm_osal.h"
35 #include "ecore.h"
36 #include "ecore_status.h"
37 #include "ecore_sp_commands.h"
38 #include "ecore_cxt.h"
39 #include "ecore_rdma.h"
40 #include "reg_addr.h"
41 #include "ecore_rt_defs.h"
42 #include "ecore_init_ops.h"
43 #include "ecore_hw.h"
44 #include "ecore_mcp.h"
45 #include "ecore_init_fw_funcs.h"
46 #include "ecore_int.h"
47 #include "pcics_reg_driver.h"
48 #include "ecore_iro.h"
49 #include "ecore_gtt_reg_addr.h"
50 #include "ecore_hsi_iwarp.h"
51 #include "ecore_ll2.h"
52 #include "ecore_ooo.h"
53 #ifndef LINUX_REMOVE
54 #include "ecore_tcp_ip.h"
55 #endif
56
57 enum _ecore_status_t ecore_rdma_bmap_alloc(struct ecore_hwfn *p_hwfn,
58                                            struct ecore_bmap *bmap,
59                                            u32              max_count,
60                                            char              *name)
61 {
62         u32 size_in_bytes;
63
64         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "max_count = %08x\n", max_count);
65
66         bmap->max_count = max_count;
67
68         if (!max_count) {
69                 bmap->bitmap = OSAL_NULL;
70                 return ECORE_SUCCESS;
71         }
72
73         size_in_bytes = sizeof(unsigned long) *
74                 DIV_ROUND_UP(max_count, (sizeof(unsigned long) * 8));
75
76         bmap->bitmap = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size_in_bytes);
77         if (!bmap->bitmap)
78         {
79                 DP_NOTICE(p_hwfn, false,
80                           "ecore bmap alloc failed: cannot allocate memory (bitmap). rc = %d\n",
81                           ECORE_NOMEM);
82                 return ECORE_NOMEM;
83         }
84
85         OSAL_SNPRINTF(bmap->name, QEDR_MAX_BMAP_NAME, "%s", name);
86
87         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n");
88         return ECORE_SUCCESS;
89 }
90
91 enum _ecore_status_t ecore_rdma_bmap_alloc_id(struct ecore_hwfn *p_hwfn,
92                                               struct ecore_bmap *bmap,
93                                               u32              *id_num)
94 {
95         *id_num = OSAL_FIND_FIRST_ZERO_BIT(bmap->bitmap, bmap->max_count);
96         if (*id_num >= bmap->max_count)
97                 return ECORE_INVAL;
98
99         OSAL_SET_BIT(*id_num, bmap->bitmap);
100
101         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: allocated id %d\n",
102                    bmap->name, *id_num);
103
104         return ECORE_SUCCESS;
105 }
106
107 void ecore_bmap_set_id(struct ecore_hwfn *p_hwfn,
108                        struct ecore_bmap *bmap,
109                        u32              id_num)
110 {
111         if (id_num >= bmap->max_count) {
112                 DP_NOTICE(p_hwfn, true,
113                           "%s bitmap: cannot set id %d max is %d\n",
114                           bmap->name, id_num, bmap->max_count);
115
116                 return;
117         }
118
119         OSAL_SET_BIT(id_num, bmap->bitmap);
120
121         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: set id %d\n",
122                    bmap->name, id_num);
123 }
124
125 void ecore_bmap_release_id(struct ecore_hwfn *p_hwfn,
126                            struct ecore_bmap *bmap,
127                            u32              id_num)
128 {
129         bool b_acquired;
130
131         if (id_num >= bmap->max_count)
132                 return;
133
134         b_acquired = OSAL_TEST_AND_CLEAR_BIT(id_num, bmap->bitmap);
135         if (!b_acquired)
136         {
137                 DP_NOTICE(p_hwfn, false, "%s bitmap: id %d already released\n",
138                           bmap->name, id_num);
139                 return;
140         }
141
142         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: released id %d\n",
143                    bmap->name, id_num);
144 }
145
146 int ecore_bmap_test_id(struct ecore_hwfn *p_hwfn,
147                        struct ecore_bmap *bmap,
148                        u32                id_num)
149 {
150         if (id_num >= bmap->max_count) {
151                 DP_NOTICE(p_hwfn, true,
152                           "%s bitmap: id %d too high. max is %d\n",
153                           bmap->name, id_num, bmap->max_count);
154                 return -1;
155         }
156
157         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "%s bitmap: tested id %d\n",
158                    bmap->name, id_num);
159
160         return OSAL_TEST_BIT(id_num, bmap->bitmap);
161 }
162
163 static bool ecore_bmap_is_empty(struct ecore_bmap *bmap)
164 {
165         return (bmap->max_count ==
166                 OSAL_FIND_FIRST_BIT(bmap->bitmap, bmap->max_count));
167 }
168
169 #ifndef LINUX_REMOVE
170 u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id)
171 {
172         /* first sb id for RoCE is after all the l2 sb */
173         return FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE) + rel_sb_id;
174 }
175
176 u32 ecore_rdma_query_cau_timer_res(void)
177 {
178         return ECORE_CAU_DEF_RX_TIMER_RES;
179 }
180 #endif
181
182 enum _ecore_status_t ecore_rdma_info_alloc(struct ecore_hwfn    *p_hwfn)
183 {
184         struct ecore_rdma_info *p_rdma_info;
185
186         p_rdma_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info));
187         if (!p_rdma_info) {
188                 DP_NOTICE(p_hwfn, false,
189                           "ecore rdma alloc failed: cannot allocate memory (rdma info).\n");
190                 return ECORE_NOMEM;
191         }
192         p_hwfn->p_rdma_info = p_rdma_info;
193
194 #ifdef CONFIG_ECORE_LOCK_ALLOC
195         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_rdma_info->lock)) {
196                 ecore_rdma_info_free(p_hwfn);
197                 return ECORE_NOMEM;
198         }
199 #endif
200         OSAL_SPIN_LOCK_INIT(&p_rdma_info->lock);
201
202         return ECORE_SUCCESS;
203 }
204
205 void ecore_rdma_info_free(struct ecore_hwfn *p_hwfn)
206 {
207 #ifdef CONFIG_ECORE_LOCK_ALLOC
208         OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_rdma_info->lock);
209 #endif
210         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info);
211         p_hwfn->p_rdma_info = OSAL_NULL;
212 }
213
214 static enum _ecore_status_t ecore_rdma_inc_ref_cnt(struct ecore_hwfn *p_hwfn)
215 {
216         enum _ecore_status_t rc = ECORE_INVAL;
217
218         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
219         if (p_hwfn->p_rdma_info->active) {
220                 p_hwfn->p_rdma_info->ref_cnt++;
221                 rc = ECORE_SUCCESS;
222         } else {
223                 DP_INFO(p_hwfn, "Ref cnt requested for inactive rdma\n");
224         }
225         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
226         return rc;
227 }
228
229 static void ecore_rdma_dec_ref_cnt(struct ecore_hwfn *p_hwfn)
230 {
231         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
232         p_hwfn->p_rdma_info->ref_cnt--;
233         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
234 }
235
236 static void ecore_rdma_activate(struct ecore_hwfn *p_hwfn)
237 {
238         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
239         p_hwfn->p_rdma_info->active = true;
240         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
241 }
242
243 /* Part of deactivating rdma is letting all the relevant flows complete before
244  * we start shutting down: Currently query-stats which can be called from MCP
245  * context.
246  */
247 /* The longest time it can take a rdma flow to complete */
248 #define ECORE_RDMA_MAX_FLOW_TIME (100)
249 static enum _ecore_status_t ecore_rdma_deactivate(struct ecore_hwfn *p_hwfn)
250 {
251         int wait_count;
252
253         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
254         p_hwfn->p_rdma_info->active = false;
255         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
256
257         /* We'll give each flow it's time to complete... */
258         wait_count = p_hwfn->p_rdma_info->ref_cnt;
259
260         while (p_hwfn->p_rdma_info->ref_cnt) {
261                 OSAL_MSLEEP(ECORE_RDMA_MAX_FLOW_TIME);
262                 if (--wait_count == 0) {
263                         DP_NOTICE(p_hwfn, false,
264                                   "Timeout on refcnt=%d\n",
265                                   p_hwfn->p_rdma_info->ref_cnt);
266                         return ECORE_TIMEOUT;
267                 }
268         }
269         return ECORE_SUCCESS;
270 }
271
272 static enum _ecore_status_t ecore_rdma_alloc(struct ecore_hwfn *p_hwfn)
273 {
274         struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
275         u32 num_cons, num_tasks;
276         enum _ecore_status_t rc;
277
278         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocating RDMA\n");
279
280         if (!p_rdma_info)
281                 return ECORE_INVAL;
282
283         if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_IWARP)
284                 p_rdma_info->proto = PROTOCOLID_IWARP;
285         else
286                 p_rdma_info->proto = PROTOCOLID_ROCE;
287
288         num_cons = ecore_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
289                                                  OSAL_NULL);
290
291         if (IS_IWARP(p_hwfn))
292                 p_rdma_info->num_qps = num_cons;
293         else
294                 p_rdma_info->num_qps = num_cons / 2;
295
296         /* INTERNAL: RoCE & iWARP use the same taskid */
297         num_tasks = ecore_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
298
299         /* Each MR uses a single task */
300         p_rdma_info->num_mrs = num_tasks;
301
302         /* Queue zone lines are shared between RoCE and L2 in such a way that
303          * they can be used by each without obstructing the other.
304          */
305         p_rdma_info->queue_zone_base = (u16) RESC_START(p_hwfn, ECORE_L2_QUEUE);
306         p_rdma_info->max_queue_zones = (u16) RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
307
308         /* Allocate a struct with device params and fill it */
309         p_rdma_info->dev = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->dev));
310         if (!p_rdma_info->dev)
311         {
312                 rc = ECORE_NOMEM;
313                 DP_NOTICE(p_hwfn, false,
314                           "ecore rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
315                           rc);
316                 return rc;
317         }
318
319         /* Allocate a struct with port params and fill it */
320         p_rdma_info->port = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_rdma_info->port));
321         if (!p_rdma_info->port)
322         {
323                 DP_NOTICE(p_hwfn, false,
324                           "ecore rdma alloc failed: cannot allocate memory (rdma info port)\n");
325                 return ECORE_NOMEM;
326         }
327
328         /* Allocate bit map for pd's */
329         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
330                                    "PD");
331         if (rc != ECORE_SUCCESS)
332         {
333                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
334                            "Failed to allocate pd_map,rc = %d\n",
335                            rc);
336                 return rc;
337         }
338
339         /* Allocate bit map for XRC Domains */
340         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
341                                    ECORE_RDMA_MAX_XRCDS, "XRCD");
342         if (rc != ECORE_SUCCESS)
343         {
344                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
345                            "Failed to allocate xrcd_map,rc = %d\n",
346                            rc);
347                 return rc;
348         }
349
350         /* Allocate DPI bitmap */
351         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
352                                    p_hwfn->dpi_count, "DPI");
353         if (rc != ECORE_SUCCESS)
354         {
355                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
356                            "Failed to allocate DPI bitmap, rc = %d\n", rc);
357                 return rc;
358         }
359
360         /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
361          * twice the number of QPs.
362          */
363         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
364                                    num_cons, "CQ");
365         if (rc != ECORE_SUCCESS)
366         {
367                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
368                            "Failed to allocate cq bitmap, rc = %d\n", rc);
369                 return rc;
370         }
371
372         /* Allocate bitmap for toggle bit for cq icids
373          * We toggle the bit every time we create or resize cq for a given icid.
374          * The maximum number of CQs is bounded to the number of connections we
375          * support. (num_qps in iWARP or num_qps/2 in RoCE).
376          */
377         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
378                                    num_cons, "Toggle");
379         if (rc != ECORE_SUCCESS)
380         {
381                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
382                            "Failed to allocate toogle bits, rc = %d\n", rc);
383                 return rc;
384         }
385
386         /* Allocate bitmap for itids */
387         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
388                                    p_rdma_info->num_mrs, "MR");
389         if (rc != ECORE_SUCCESS)
390         {
391                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
392                            "Failed to allocate itids bitmaps, rc = %d\n", rc);
393                 return rc;
394         }
395
396         /* Allocate bitmap for qps. */
397         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->qp_map,
398                                    p_rdma_info->num_qps, "QP");
399         if (rc != ECORE_SUCCESS)
400         {
401                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
402                            "Failed to allocate qp bitmap, rc = %d\n", rc);
403                 return rc;
404         }
405
406         /* Allocate bitmap for cids used for responders/requesters. */
407         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
408                                    "REAL CID");
409         if (rc != ECORE_SUCCESS)
410         {
411                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
412                            "Failed to allocate cid bitmap, rc = %d\n", rc);
413                 return rc;
414         }
415
416         /* The first SRQ follows the last XRC SRQ. This means that the
417          * SRQ IDs start from an offset equals to max_xrc_srqs.
418          */
419         p_rdma_info->srq_id_offset = (u16)ecore_cxt_get_xrc_srq_count(p_hwfn);
420         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrc_srq_map,
421                                    p_rdma_info->srq_id_offset, "XRC SRQ");
422         if (rc != ECORE_SUCCESS) {
423                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
424                            "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
425                 return rc;
426         }
427
428         /* Allocate bitmap for srqs */
429         p_rdma_info->num_srqs = ecore_cxt_get_srq_count(p_hwfn);
430         rc = ecore_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
431                                    p_rdma_info->num_srqs,
432                                    "SRQ");
433         if (rc != ECORE_SUCCESS) {
434                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
435                            "Failed to allocate srq bitmap, rc = %d\n", rc);
436
437                 return rc;
438         }
439
440         if (IS_IWARP(p_hwfn))
441                 rc = ecore_iwarp_alloc(p_hwfn);
442
443         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
444
445         return rc;
446 }
447
448 void ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
449                           struct ecore_bmap *bmap,
450                           bool check)
451 {
452         int weight, line, item, last_line, last_item;
453         u64 *pmap;
454
455         if (!bmap || !bmap->bitmap)
456                 return;
457
458         if (!check)
459                 goto end;
460
461         weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
462         if (!weight)
463                 goto end;
464
465         DP_NOTICE(p_hwfn, false,
466                   "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
467                   bmap->name, bmap->max_count, weight);
468
469         pmap = (u64 *)bmap->bitmap;
470         last_line = bmap->max_count / (64*8);
471         last_item = last_line * 8 + (((bmap->max_count % (64*8)) + 63) / 64);
472
473         /* print aligned non-zero lines, if any */
474         for (item = 0, line = 0; line < last_line; line++, item += 8) {
475                 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item], 64*8))
476                         DP_NOTICE(p_hwfn, false,
477                                   "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
478                                   line, (unsigned long long)pmap[item],
479                                 (unsigned long long)pmap[item+1],
480                                 (unsigned long long)pmap[item+2],
481                                   (unsigned long long)pmap[item+3],
482                                 (unsigned long long)pmap[item+4],
483                                 (unsigned long long)pmap[item+5],
484                                   (unsigned long long)pmap[item+6],
485                                 (unsigned long long)pmap[item+7]);
486         }
487
488         /* print last unaligned non-zero line, if any */
489         if ((bmap->max_count % (64*8)) &&
490             (OSAL_BITMAP_WEIGHT((unsigned long *)&pmap[item],
491                                 bmap->max_count-item*64))) {
492                 u8 str_last_line[200] = { 0 };
493                 int  offset;
494
495                 offset = OSAL_SPRINTF(str_last_line, "line 0x%04x: ", line);
496                 for (; item < last_item; item++) {
497                         offset += OSAL_SPRINTF(str_last_line+offset,
498                                                "0x%016llx ",
499                                 (unsigned long long)pmap[item]);
500                 }
501                 DP_NOTICE(p_hwfn, false, "%s\n", str_last_line);
502         }
503
504 end:
505         OSAL_FREE(p_hwfn->p_dev, bmap->bitmap);
506         bmap->bitmap = OSAL_NULL;
507 }
508
509
510 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn)
511 {
512         if (IS_IWARP(p_hwfn))
513                 ecore_iwarp_resc_free(p_hwfn);
514
515         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
516         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->qp_map, 1);
517         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
518         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
519         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
520         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
521         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
522         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
523         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
524         ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
525
526         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->port);
527         p_hwfn->p_rdma_info->port = OSAL_NULL;
528
529         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_rdma_info->dev);
530         p_hwfn->p_rdma_info->dev = OSAL_NULL;
531 }
532
533 static OSAL_INLINE void ecore_rdma_free_reserved_lkey(struct ecore_hwfn *p_hwfn)
534 {
535         ecore_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
536 }
537
538 static void ecore_rdma_free_ilt(struct ecore_hwfn *p_hwfn)
539 {
540         /* Free Connection CXT */
541         ecore_cxt_free_ilt_range(
542                 p_hwfn, ECORE_ELEM_CXT,
543                 ecore_cxt_get_proto_cid_start(p_hwfn,
544                                               p_hwfn->p_rdma_info->proto),
545                 ecore_cxt_get_proto_cid_count(p_hwfn,
546                                               p_hwfn->p_rdma_info->proto,
547                                               OSAL_NULL));
548
549         /* Free Task CXT ( Intentionally RoCE as task-id is shared between
550          * RoCE and iWARP
551          */
552         ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
553                                  ecore_cxt_get_proto_tid_count(
554                                          p_hwfn, PROTOCOLID_ROCE));
555
556         /* Free TSDM CXT */
557         ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
558                                  ecore_cxt_get_srq_count(p_hwfn));
559 }
560
561 static void ecore_rdma_free(struct ecore_hwfn *p_hwfn)
562 {
563         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n");
564
565         ecore_rdma_free_reserved_lkey(p_hwfn);
566
567         ecore_rdma_resc_free(p_hwfn);
568
569         ecore_rdma_free_ilt(p_hwfn);
570 }
571
572 static void ecore_rdma_get_guid(struct ecore_hwfn *p_hwfn, u8 *guid)
573 {
574         u8 mac_addr[6];
575
576         OSAL_MEMCPY(&mac_addr[0], &p_hwfn->hw_info.hw_mac_addr[0], ETH_ALEN);
577         guid[0] = mac_addr[0] ^ 2;
578         guid[1] = mac_addr[1];
579         guid[2] = mac_addr[2];
580         guid[3] = 0xff;
581         guid[4] = 0xfe;
582         guid[5] = mac_addr[3];
583         guid[6] = mac_addr[4];
584         guid[7] = mac_addr[5];
585 }
586
587
588 static void ecore_rdma_init_events(
589         struct ecore_hwfn *p_hwfn,
590         struct ecore_rdma_start_in_params *params)
591 {
592         struct ecore_rdma_events *events;
593
594         events = &p_hwfn->p_rdma_info->events;
595
596         events->unaffiliated_event = params->events->unaffiliated_event;
597         events->affiliated_event = params->events->affiliated_event;
598         events->context = params->events->context;
599 }
600
601 static void ecore_rdma_init_devinfo(
602         struct ecore_hwfn *p_hwfn,
603         struct ecore_rdma_start_in_params *params)
604 {
605         struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
606         u32 pci_status_control;
607
608         /* Vendor specific information */
609         dev->vendor_id = p_hwfn->p_dev->vendor_id;
610         dev->vendor_part_id = p_hwfn->p_dev->device_id;
611         dev->hw_ver = 0;
612         dev->fw_ver = STORM_FW_VERSION;
613
614         ecore_rdma_get_guid(p_hwfn, (u8 *)(&dev->sys_image_guid));
615         dev->node_guid = dev->sys_image_guid;
616
617         dev->max_sge = OSAL_MIN_T(u32, RDMA_MAX_SGE_PER_SQ_WQE,
618                                   RDMA_MAX_SGE_PER_RQ_WQE);
619
620         if (p_hwfn->p_dev->rdma_max_sge) {
621                 dev->max_sge = OSAL_MIN_T(u32,
622                                      p_hwfn->p_dev->rdma_max_sge,
623                                      dev->max_sge);
624         }
625
626         /* Set these values according to configuration
627          * MAX SGE for SRQ is not defined by FW for now
628          * define it in driver.
629          * TODO: Get this value from FW.
630          */
631         dev->max_srq_sge = ECORE_RDMA_MAX_SGE_PER_SRQ_WQE;
632         if (p_hwfn->p_dev->rdma_max_srq_sge) {
633                 dev->max_srq_sge = OSAL_MIN_T(u32,
634                                      p_hwfn->p_dev->rdma_max_srq_sge,
635                                      dev->max_srq_sge);
636         }
637
638         dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
639         dev->max_inline = (p_hwfn->p_dev->rdma_max_inline) ?
640                 OSAL_MIN_T(u32,
641                            p_hwfn->p_dev->rdma_max_inline,
642                            dev->max_inline) :
643                         dev->max_inline;
644
645         dev->max_wqe = ECORE_RDMA_MAX_WQE;
646         dev->max_cnq = (u8)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ);
647
648         /* The number of QPs may be higher than ECORE_ROCE_MAX_QPS. because
649          * it is up-aligned to 16 and then to ILT page size within ecore cxt.
650          * This is OK in terms of ILT but we don't want to configure the FW
651          * above its abilities
652          */
653         dev->max_qp = OSAL_MIN_T(u64, ROCE_MAX_QPS,
654                              p_hwfn->p_rdma_info->num_qps);
655
656         /* CQs uses the same icids that QPs use hence they are limited by the
657          * number of icids. There are two icids per QP.
658          */
659         dev->max_cq = dev->max_qp * 2;
660
661         /* The number of mrs is smaller by 1 since the first is reserved */
662         dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
663         dev->max_mr_size = ECORE_RDMA_MAX_MR_SIZE;
664         /* The maximum CQE capacity per CQ supported */
665         /* max number of cqes will be in two layer pbl,
666          * 8 is the pointer size in bytes
667          * 32 is the size of cq element in bytes
668          */
669         if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_32_BITS)
670                 dev->max_cqe = ECORE_RDMA_MAX_CQE_32_BIT;
671         else
672                 dev->max_cqe = ECORE_RDMA_MAX_CQE_16_BIT;
673
674         dev->max_mw = 0;
675         dev->max_fmr = ECORE_RDMA_MAX_FMR;
676         dev->max_mr_mw_fmr_pbl = (OSAL_PAGE_SIZE/8) * (OSAL_PAGE_SIZE/8);
677         dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * OSAL_PAGE_SIZE;
678         dev->max_pkey = ECORE_RDMA_MAX_P_KEY;
679         /* Right now we dont take any parameters from user
680          * So assign predefined max_srq to num_srqs.
681          */
682         dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
683
684         /* SRQ WQE size */
685         dev->max_srq_wr = ECORE_RDMA_MAX_SRQ_WQE_ELEM;
686
687         dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
688                                           (RDMA_RESP_RD_ATOMIC_ELM_SIZE*2);
689         dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
690                                          RDMA_REQ_RD_ATOMIC_ELM_SIZE;
691
692         dev->max_dev_resp_rd_atomic_resc =
693                 dev->max_qp_resp_rd_atomic_resc * p_hwfn->p_rdma_info->num_qps;
694         dev->page_size_caps = ECORE_RDMA_PAGE_SIZE_CAPS;
695         dev->dev_ack_delay = ECORE_RDMA_ACK_DELAY;
696         dev->max_pd = RDMA_MAX_PDS;
697         dev->max_ah = dev->max_qp;
698         dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, ECORE_RDMA_STATS_QUEUE);
699
700         /* Set capablities */
701         dev->dev_caps = 0;
702         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RNR_NAK, 1);
703         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
704         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
705         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_RESIZE_CQ, 1);
706         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
707         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
708         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ZBVA, 1);
709         SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
710
711         /* Check atomic operations support in PCI configuration space. */
712         OSAL_PCI_READ_CONFIG_DWORD(p_hwfn->p_dev,
713                                    PCICFG_DEVICE_STATUS_CONTROL_2,
714                                    &pci_status_control);
715
716         if (pci_status_control &
717             PCICFG_DEVICE_STATUS_CONTROL_2_ATOMIC_REQ_ENABLE)
718                 SET_FIELD(dev->dev_caps, ECORE_RDMA_DEV_CAP_ATOMIC_OP, 1);
719
720         if (IS_IWARP(p_hwfn))
721                 ecore_iwarp_init_devinfo(p_hwfn);
722 }
723
724 static void ecore_rdma_init_port(
725         struct ecore_hwfn *p_hwfn)
726 {
727         struct ecore_rdma_port *port = p_hwfn->p_rdma_info->port;
728         struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
729
730         port->port_state = p_hwfn->mcp_info->link_output.link_up ?
731                 ECORE_RDMA_PORT_UP : ECORE_RDMA_PORT_DOWN;
732
733         port->max_msg_size = OSAL_MIN_T(u64,
734                                    (dev->max_mr_mw_fmr_size *
735                                     p_hwfn->p_dev->rdma_max_sge),
736                                         ((u64)1 << 31));
737
738         port->pkey_bad_counter = 0;
739 }
740
741 static enum _ecore_status_t ecore_rdma_init_hw(
742         struct ecore_hwfn *p_hwfn,
743         struct ecore_ptt *p_ptt)
744 {
745         u32 ll2_ethertype_en;
746
747         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW\n");
748         p_hwfn->b_rdma_enabled_in_prs = false;
749
750         if (IS_IWARP(p_hwfn))
751                 return ecore_iwarp_init_hw(p_hwfn, p_ptt);
752
753         ecore_wr(p_hwfn,
754                  p_ptt,
755                  PRS_REG_ROCE_DEST_QP_MAX_PF,
756                  0);
757
758         p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
759
760         /* We delay writing to this reg until first cid is allocated. See
761          * ecore_cxt_dynamic_ilt_alloc function for more details
762          */
763
764         ll2_ethertype_en = ecore_rd(p_hwfn,
765                              p_ptt,
766                              PRS_REG_LIGHT_L2_ETHERTYPE_EN);
767         ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
768                  (ll2_ethertype_en | 0x01));
769
770 #ifndef REAL_ASIC_ONLY
771         if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
772                 ecore_wr(p_hwfn,
773                          p_ptt,
774                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
775                          0);
776                 ecore_wr(p_hwfn,
777                          p_ptt,
778                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 4,
779                          0);
780         }
781 #endif
782
783         if (ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2)
784         {
785                 DP_NOTICE(p_hwfn,
786                           true,
787                           "The first RoCE's cid should be even\n");
788                 return ECORE_UNKNOWN_ERROR;
789         }
790
791         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Initializing HW - Done\n");
792         return ECORE_SUCCESS;
793 }
794
795 static enum _ecore_status_t
796 ecore_rdma_start_fw(struct ecore_hwfn *p_hwfn,
797 #ifdef CONFIG_DCQCN
798                     struct ecore_ptt *p_ptt,
799 #else
800                     struct ecore_ptt OSAL_UNUSED *p_ptt,
801 #endif
802                     struct ecore_rdma_start_in_params *params)
803 {
804         struct rdma_init_func_ramrod_data *p_ramrod;
805         struct rdma_init_func_hdr *pheader;
806         struct ecore_rdma_info *p_rdma_info;
807         struct ecore_sp_init_data init_data;
808         struct ecore_spq_entry *p_ent;
809         u16 igu_sb_id, sb_id;
810         u8 ll2_queue_id;
811         u32 cnq_id;
812         enum _ecore_status_t rc;
813
814         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Starting FW\n");
815
816         p_rdma_info = p_hwfn->p_rdma_info;
817
818         /* Save the number of cnqs for the function close ramrod */
819         p_rdma_info->num_cnqs = params->desired_cnq;
820
821         /* Get SPQ entry */
822         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
823         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
824         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
825
826         rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
827                                    p_rdma_info->proto, &init_data);
828         if (rc != ECORE_SUCCESS)
829                 return rc;
830
831         if (IS_IWARP(p_hwfn)) {
832                 ecore_iwarp_init_fw_ramrod(p_hwfn,
833                                            &p_ent->ramrod.iwarp_init_func);
834                 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
835         } else {
836
837 #ifdef CONFIG_DCQCN
838                 rc = ecore_roce_dcqcn_cfg(p_hwfn, &params->roce.dcqcn_params,
839                                           &p_ent->ramrod.roce_init_func, p_ptt);
840                 if (rc != ECORE_SUCCESS) {
841                         DP_NOTICE(p_hwfn, false,
842                                   "Failed to configure DCQCN. rc = %d.\n", rc);
843                         return rc;
844                 }
845 #endif
846                 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
847
848                 /* The ll2_queue_id is used only for UD QPs */
849                 ll2_queue_id = ecore_ll2_handle_to_queue_id(
850                         p_hwfn, params->roce.ll2_handle);
851                 p_ent->ramrod.roce_init_func.roce.ll2_queue_id = ll2_queue_id;
852
853         }
854
855         pheader = &p_ramrod->params_header;
856         pheader->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
857         pheader->num_cnqs = params->desired_cnq;
858
859         /* The first SRQ ILT page is used for XRC SRQs and all the following
860          * pages contain regular SRQs. Hence the first regular SRQ ID is the
861          * maximum number XRC SRQs.
862          */
863         pheader->first_reg_srq_id = p_rdma_info->srq_id_offset;
864         pheader->reg_srq_base_addr =
865                 ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
866
867         if (params->roce.cq_mode == ECORE_RDMA_CQ_MODE_16_BITS)
868                 pheader->cq_ring_mode = 1; /* 1=16 bits */
869         else
870                 pheader->cq_ring_mode = 0; /* 0=32 bits */
871
872         for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++)
873         {
874                 sb_id = (u16)OSAL_GET_RDMA_SB_ID(p_hwfn, cnq_id);
875                 igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
876                 p_ramrod->cnq_params[cnq_id].sb_num =
877                         OSAL_CPU_TO_LE16(igu_sb_id);
878
879                 p_ramrod->cnq_params[cnq_id].sb_index =
880                         p_hwfn->pf_params.rdma_pf_params.gl_pi;
881
882                 p_ramrod->cnq_params[cnq_id].num_pbl_pages =
883                         params->cnq_pbl_list[cnq_id].num_pbl_pages;
884
885                 p_ramrod->cnq_params[cnq_id].pbl_base_addr.hi =
886                         DMA_HI_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
887                 p_ramrod->cnq_params[cnq_id].pbl_base_addr.lo =
888                         DMA_LO_LE(params->cnq_pbl_list[cnq_id].pbl_ptr);
889
890                 /* we arbitrarily decide that cnq_id will be as qz_offset */
891                 p_ramrod->cnq_params[cnq_id].queue_zone_num =
892                         OSAL_CPU_TO_LE16(p_rdma_info->queue_zone_base + cnq_id);
893         }
894
895         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
896
897         return rc;
898 }
899
900 enum _ecore_status_t ecore_rdma_alloc_tid(void  *rdma_cxt,
901                                           u32   *itid)
902 {
903         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
904         enum _ecore_status_t rc;
905
906         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID\n");
907
908         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
909         rc = ecore_rdma_bmap_alloc_id(p_hwfn,
910                                       &p_hwfn->p_rdma_info->tid_map,
911                                       itid);
912         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
913         if (rc != ECORE_SUCCESS) {
914                 DP_NOTICE(p_hwfn, false, "Failed in allocating tid\n");
915                 goto out;
916         }
917
918         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_TASK, *itid);
919 out:
920         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
921         return rc;
922 }
923
924 static OSAL_INLINE enum _ecore_status_t ecore_rdma_reserve_lkey(
925                 struct ecore_hwfn *p_hwfn)
926 {
927         struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
928
929         /* Tid 0 will be used as the key for "reserved MR".
930          * The driver should allocate memory for it so it can be loaded but no
931          * ramrod should be passed on it.
932          */
933         ecore_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
934         if (dev->reserved_lkey != RDMA_RESERVED_LKEY)
935         {
936                 DP_NOTICE(p_hwfn, true,
937                           "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
938                 return ECORE_INVAL;
939         }
940
941         return ECORE_SUCCESS;
942 }
943
944 static enum _ecore_status_t ecore_rdma_setup(struct ecore_hwfn    *p_hwfn,
945                                 struct ecore_ptt                  *p_ptt,
946                                 struct ecore_rdma_start_in_params *params)
947 {
948         enum _ecore_status_t rc = 0;
949
950         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA setup\n");
951
952         ecore_rdma_init_devinfo(p_hwfn, params);
953         ecore_rdma_init_port(p_hwfn);
954         ecore_rdma_init_events(p_hwfn, params);
955
956         rc = ecore_rdma_reserve_lkey(p_hwfn);
957         if (rc != ECORE_SUCCESS)
958                 return rc;
959
960         rc = ecore_rdma_init_hw(p_hwfn, p_ptt);
961         if (rc != ECORE_SUCCESS)
962                 return rc;
963
964         if (IS_IWARP(p_hwfn)) {
965                 rc = ecore_iwarp_setup(p_hwfn, params);
966                 if (rc != ECORE_SUCCESS)
967                         return rc;
968         } else {
969                 rc = ecore_roce_setup(p_hwfn);
970                 if (rc != ECORE_SUCCESS)
971                         return rc;
972         }
973
974         return ecore_rdma_start_fw(p_hwfn, p_ptt, params);
975 }
976
977
978 enum _ecore_status_t ecore_rdma_stop(void *rdma_cxt)
979 {
980         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
981         struct rdma_close_func_ramrod_data *p_ramrod;
982         struct ecore_sp_init_data init_data;
983         struct ecore_spq_entry *p_ent;
984         struct ecore_ptt *p_ptt;
985         u32 ll2_ethertype_en;
986         enum _ecore_status_t rc = ECORE_TIMEOUT;
987
988         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop\n");
989
990         rc = ecore_rdma_deactivate(p_hwfn);
991         if (rc != ECORE_SUCCESS)
992                 return rc;
993
994         p_ptt = ecore_ptt_acquire(p_hwfn);
995         if (!p_ptt) {
996                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Failed to acquire PTT\n");
997                 return rc;
998         }
999
1000 #ifdef CONFIG_DCQCN
1001         ecore_roce_stop_rl(p_hwfn);
1002 #endif
1003
1004         /* Disable RoCE search */
1005         ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
1006         p_hwfn->b_rdma_enabled_in_prs = false;
1007
1008         ecore_wr(p_hwfn,
1009                  p_ptt,
1010                  PRS_REG_ROCE_DEST_QP_MAX_PF,
1011                  0);
1012
1013         ll2_ethertype_en = ecore_rd(p_hwfn,
1014                                     p_ptt,
1015                                     PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1016
1017         ecore_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1018                  (ll2_ethertype_en & 0xFFFE));
1019
1020 #ifndef REAL_ASIC_ONLY
1021         /* INTERNAL: In CMT mode, re-initialize nig to direct packets to both
1022          * enginesfor L2 performance, Roce requires all traffic to go just to
1023          * engine 0.
1024          */
1025         if (ECORE_IS_BB_A0(p_hwfn->p_dev) && ECORE_IS_CMT(p_hwfn->p_dev)) {
1026                 DP_ERR(p_hwfn->p_dev,
1027                        "On Everest 4 Big Bear Board revision A0 when RoCE driver is loaded L2 performance is sub-optimal (all traffic is routed to engine 0). For optimal L2 results either remove RoCE driver or use board revision B0\n");
1028
1029                 ecore_wr(p_hwfn,
1030                          p_ptt,
1031                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL,
1032                          0x55555555);
1033                 ecore_wr(p_hwfn,
1034                          p_ptt,
1035                          NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
1036                          0x55555555);
1037         }
1038 #endif
1039
1040         if (IS_IWARP(p_hwfn)) {
1041                 rc = ecore_iwarp_stop(p_hwfn);
1042                 if (rc != ECORE_SUCCESS) {
1043                         ecore_ptt_release(p_hwfn, p_ptt);
1044                         return 0;
1045                 }
1046         } else {
1047                 rc = ecore_roce_stop(p_hwfn);
1048                 if (rc != ECORE_SUCCESS) {
1049                         ecore_ptt_release(p_hwfn, p_ptt);
1050                         return 0;
1051                 }
1052         }
1053
1054         ecore_ptt_release(p_hwfn, p_ptt);
1055
1056         /* Get SPQ entry */
1057         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1058         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1059         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1060
1061         /* Stop RoCE */
1062         rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
1063                                    p_hwfn->p_rdma_info->proto, &init_data);
1064         if (rc != ECORE_SUCCESS)
1065                 goto out;
1066
1067         p_ramrod = &p_ent->ramrod.rdma_close_func;
1068
1069         p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
1070         p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM);
1071
1072         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1073
1074 out:
1075         ecore_rdma_free(p_hwfn);
1076
1077         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
1078         return rc;
1079 }
1080
1081 enum _ecore_status_t ecore_rdma_add_user(void                 *rdma_cxt,
1082                         struct ecore_rdma_add_user_out_params *out_params)
1083 {
1084         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1085         u32 dpi_start_offset;
1086         u32 returned_id = 0;
1087         enum _ecore_status_t rc;
1088
1089         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding User\n");
1090
1091         /* Allocate DPI */
1092         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1093         rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
1094                                       &returned_id);
1095         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1096
1097         if (rc != ECORE_SUCCESS)
1098                 DP_NOTICE(p_hwfn, false, "Failed in allocating dpi\n");
1099
1100         out_params->dpi = (u16)returned_id;
1101
1102         /* Calculate the corresponding DPI address */
1103         dpi_start_offset = p_hwfn->dpi_start_offset;
1104
1105         out_params->dpi_addr = (u64)(osal_int_ptr_t)((u8 OSAL_IOMEM*)p_hwfn->doorbells +
1106                                                      dpi_start_offset +
1107                                                      ((out_params->dpi) * p_hwfn->dpi_size));
1108
1109         out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset +
1110                                     out_params->dpi * p_hwfn->dpi_size;
1111
1112         out_params->dpi_size = p_hwfn->dpi_size;
1113         out_params->wid_count = p_hwfn->wid_count;
1114
1115         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
1116         return rc;
1117 }
1118
1119 struct ecore_rdma_port *ecore_rdma_query_port(void      *rdma_cxt)
1120 {
1121         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1122         struct ecore_rdma_port *p_port = p_hwfn->p_rdma_info->port;
1123         struct ecore_mcp_link_state *p_link_output;
1124
1125         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA Query port\n");
1126
1127         /* The link state is saved only for the leading hwfn */
1128         p_link_output =
1129                 &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
1130
1131         /* Link may have changed... */
1132         p_port->port_state = p_link_output->link_up ? ECORE_RDMA_PORT_UP
1133                                                     : ECORE_RDMA_PORT_DOWN;
1134
1135         p_port->link_speed = p_link_output->speed;
1136
1137         p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
1138
1139         return p_port;
1140 }
1141
1142 struct ecore_rdma_device *ecore_rdma_query_device(void  *rdma_cxt)
1143 {
1144         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1145
1146         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query device\n");
1147
1148         /* Return struct with device parameters */
1149         return p_hwfn->p_rdma_info->dev;
1150 }
1151
1152 void ecore_rdma_free_tid(void   *rdma_cxt,
1153                          u32    itid)
1154 {
1155         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1156
1157         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", itid);
1158
1159         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1160         ecore_bmap_release_id(p_hwfn,
1161                               &p_hwfn->p_rdma_info->tid_map,
1162                               itid);
1163         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1164 }
1165
1166 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
1167 {
1168         struct ecore_hwfn *p_hwfn;
1169         u16 qz_num;
1170         u32 addr;
1171
1172         p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1173
1174         if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
1175                 DP_NOTICE(p_hwfn, false,
1176                           "queue zone offset %d is too large (max is %d)\n",
1177                           qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
1178                 return;
1179         }
1180
1181         qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
1182         addr = GTT_BAR0_MAP_REG_USDM_RAM +
1183                USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
1184
1185         REG_WR16(p_hwfn, addr, prod);
1186
1187         /* keep prod updates ordered */
1188         OSAL_WMB(p_hwfn->p_dev);
1189 }
1190
1191 enum _ecore_status_t ecore_rdma_alloc_pd(void   *rdma_cxt,
1192                                          u16    *pd)
1193 {
1194         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1195         u32                  returned_id;
1196         enum _ecore_status_t rc;
1197
1198         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD\n");
1199
1200         /* Allocates an unused protection domain */
1201         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1202         rc = ecore_rdma_bmap_alloc_id(p_hwfn,
1203                                       &p_hwfn->p_rdma_info->pd_map,
1204                                       &returned_id);
1205         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1206         if (rc != ECORE_SUCCESS)
1207                 DP_NOTICE(p_hwfn, false, "Failed in allocating pd id\n");
1208
1209         *pd = (u16)returned_id;
1210
1211         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
1212         return rc;
1213 }
1214
1215 void ecore_rdma_free_pd(void    *rdma_cxt,
1216                         u16     pd)
1217 {
1218         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1219
1220         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "pd = %08x\n", pd);
1221
1222         /* Returns a previously allocated protection domain for reuse */
1223         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1224         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
1225         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1226 }
1227
1228 enum _ecore_status_t ecore_rdma_alloc_xrcd(void *rdma_cxt,
1229                                            u16  *xrcd_id)
1230 {
1231         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1232         u32                  returned_id;
1233         enum _ecore_status_t rc;
1234
1235         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD\n");
1236
1237         /* Allocates an unused XRC domain */
1238         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1239         rc = ecore_rdma_bmap_alloc_id(p_hwfn,
1240                                       &p_hwfn->p_rdma_info->xrcd_map,
1241                                       &returned_id);
1242         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1243         if (rc != ECORE_SUCCESS)
1244                 DP_NOTICE(p_hwfn, false, "Failed in allocating xrcd id\n");
1245
1246         *xrcd_id = (u16)returned_id;
1247
1248         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
1249         return rc;
1250 }
1251
1252 void ecore_rdma_free_xrcd(void  *rdma_cxt,
1253                           u16   xrcd_id)
1254 {
1255         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1256
1257         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
1258
1259         /* Returns a previously allocated protection domain for reuse */
1260         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1261         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
1262         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1263 }
1264
1265 static enum ecore_rdma_toggle_bit
1266 ecore_rdma_toggle_bit_create_resize_cq(struct ecore_hwfn *p_hwfn,
1267                                        u16 icid)
1268 {
1269         struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
1270         enum ecore_rdma_toggle_bit toggle_bit;
1271         u32 bmap_id;
1272
1273         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", icid);
1274
1275         /* the function toggle the bit that is related to a given icid
1276          * and returns the new toggle bit's value
1277          */
1278         bmap_id = icid - ecore_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
1279
1280         OSAL_SPIN_LOCK(&p_info->lock);
1281         toggle_bit = !OSAL_TEST_AND_FLIP_BIT(bmap_id, p_info->toggle_bits.bitmap);
1282         OSAL_SPIN_UNLOCK(&p_info->lock);
1283
1284         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_RDMA_TOGGLE_BIT_= %d\n",
1285                    toggle_bit);
1286
1287         return toggle_bit;
1288 }
1289
1290 enum _ecore_status_t ecore_rdma_create_cq(void                        *rdma_cxt,
1291                                 struct ecore_rdma_create_cq_in_params *params,
1292                                 u16                                   *icid)
1293 {
1294         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1295         struct ecore_rdma_info *p_info = p_hwfn->p_rdma_info;
1296         struct rdma_create_cq_ramrod_data       *p_ramrod;
1297         enum ecore_rdma_toggle_bit              toggle_bit;
1298         struct ecore_sp_init_data               init_data;
1299         struct ecore_spq_entry                  *p_ent;
1300         enum _ecore_status_t                    rc;
1301         u32                                     returned_id;
1302
1303         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cq_handle = %08x%08x\n",
1304                    params->cq_handle_hi, params->cq_handle_lo);
1305
1306         /* Allocate icid */
1307         OSAL_SPIN_LOCK(&p_info->lock);
1308         rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1309         OSAL_SPIN_UNLOCK(&p_info->lock);
1310
1311         if (rc != ECORE_SUCCESS)
1312         {
1313                 DP_NOTICE(p_hwfn, false, "Can't create CQ, rc = %d\n", rc);
1314                 return rc;
1315         }
1316
1317         *icid = (u16)(returned_id +
1318                       ecore_cxt_get_proto_cid_start(
1319                               p_hwfn, p_info->proto));
1320
1321         /* Check if icid requires a page allocation */
1322         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *icid);
1323         if (rc != ECORE_SUCCESS)
1324                 goto err;
1325
1326         /* Get SPQ entry */
1327         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1328         init_data.cid = *icid;
1329         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1330         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1331
1332         /* Send create CQ ramrod */
1333         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1334                                    RDMA_RAMROD_CREATE_CQ,
1335                                    p_info->proto, &init_data);
1336         if (rc != ECORE_SUCCESS)
1337                 goto err;
1338
1339         p_ramrod = &p_ent->ramrod.rdma_create_cq;
1340
1341         p_ramrod->cq_handle.hi = OSAL_CPU_TO_LE32(params->cq_handle_hi);
1342         p_ramrod->cq_handle.lo = OSAL_CPU_TO_LE32(params->cq_handle_lo);
1343         p_ramrod->dpi = OSAL_CPU_TO_LE16(params->dpi);
1344         p_ramrod->is_two_level_pbl = params->pbl_two_level;
1345         p_ramrod->max_cqes = OSAL_CPU_TO_LE32(params->cq_size);
1346         DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1347         p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(params->pbl_num_pages);
1348         p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, ECORE_RDMA_CNQ_RAM)
1349                         + params->cnq_id;
1350         p_ramrod->int_timeout = params->int_timeout;
1351         /* INTERNAL: Two layer PBL is currently not supported, ignoring next line */
1352         /* INTERNAL: p_ramrod->pbl_log_page_size = params->pbl_page_size_log - 12; */
1353
1354         /* toggle the bit for every resize or create cq for a given icid */
1355         toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1356
1357         p_ramrod->toggle_bit = toggle_bit;
1358
1359         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1360         if (rc != ECORE_SUCCESS) {
1361                 /* restore toggle bit */
1362                 ecore_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1363                 goto err;
1364         }
1365
1366         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1367         return rc;
1368
1369 err:
1370         /* release allocated icid */
1371         OSAL_SPIN_LOCK(&p_info->lock);
1372         ecore_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1373         OSAL_SPIN_UNLOCK(&p_info->lock);
1374
1375         DP_NOTICE(p_hwfn, false, "Create CQ failed, rc = %d\n", rc);
1376
1377         return rc;
1378 }
1379
1380 enum _ecore_status_t ecore_rdma_destroy_cq(void                 *rdma_cxt,
1381                         struct ecore_rdma_destroy_cq_in_params  *in_params,
1382                         struct ecore_rdma_destroy_cq_out_params *out_params)
1383 {
1384         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1385         struct rdma_destroy_cq_output_params *p_ramrod_res;
1386         struct rdma_destroy_cq_ramrod_data      *p_ramrod;
1387         struct ecore_sp_init_data               init_data;
1388         struct ecore_spq_entry                  *p_ent;
1389         dma_addr_t                              ramrod_res_phys;
1390         enum _ecore_status_t                    rc = ECORE_NOMEM;
1391
1392         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
1393
1394         p_ramrod_res = (struct rdma_destroy_cq_output_params *)
1395                         OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
1396                                 sizeof(struct rdma_destroy_cq_output_params));
1397         if (!p_ramrod_res)
1398         {
1399                 DP_NOTICE(p_hwfn, false,
1400                           "ecore destroy cq failed: cannot allocate memory (ramrod)\n");
1401                 return rc;
1402         }
1403
1404         /* Get SPQ entry */
1405         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1406         init_data.cid =  in_params->icid;
1407         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1408         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1409
1410         /* Send destroy CQ ramrod */
1411         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1412                                    RDMA_RAMROD_DESTROY_CQ,
1413                                    p_hwfn->p_rdma_info->proto, &init_data);
1414         if (rc != ECORE_SUCCESS)
1415                 goto err;
1416
1417         p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1418         DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1419
1420         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1421         if (rc != ECORE_SUCCESS)
1422                 goto err;
1423
1424         out_params->num_cq_notif =
1425                 OSAL_LE16_TO_CPU(p_ramrod_res->cnq_num);
1426
1427         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1428                                sizeof(struct rdma_destroy_cq_output_params));
1429
1430         /* Free icid */
1431         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1432
1433         ecore_bmap_release_id(p_hwfn,
1434                               &p_hwfn->p_rdma_info->cq_map,
1435                 (in_params->icid - ecore_cxt_get_proto_cid_start(
1436                         p_hwfn, p_hwfn->p_rdma_info->proto)));
1437
1438         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1439
1440         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1441         return rc;
1442
1443 err:
1444         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
1445                                sizeof(struct rdma_destroy_cq_output_params));
1446
1447         return rc;
1448 }
1449
1450 void ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac)
1451 {
1452         p_fw_mac[0] = OSAL_CPU_TO_LE16((p_ecore_mac[0] << 8) + p_ecore_mac[1]);
1453         p_fw_mac[1] = OSAL_CPU_TO_LE16((p_ecore_mac[2] << 8) + p_ecore_mac[3]);
1454         p_fw_mac[2] = OSAL_CPU_TO_LE16((p_ecore_mac[4] << 8) + p_ecore_mac[5]);
1455 }
1456
1457
1458 enum _ecore_status_t ecore_rdma_query_qp(void                   *rdma_cxt,
1459                         struct ecore_rdma_qp                    *qp,
1460                         struct ecore_rdma_query_qp_out_params   *out_params)
1461
1462 {
1463         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1464         enum _ecore_status_t rc = ECORE_SUCCESS;
1465
1466         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid);
1467
1468         /* The following fields are filled in from qp and not FW as they can't
1469          * be modified by FW
1470          */
1471         out_params->mtu = qp->mtu;
1472         out_params->dest_qp = qp->dest_qp;
1473         out_params->incoming_atomic_en = qp->incoming_atomic_en;
1474         out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1475         out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1476         out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1477         out_params->dgid = qp->dgid;
1478         out_params->flow_label = qp->flow_label;
1479         out_params->hop_limit_ttl = qp->hop_limit_ttl;
1480         out_params->traffic_class_tos = qp->traffic_class_tos;
1481         out_params->timeout = qp->ack_timeout;
1482         out_params->rnr_retry = qp->rnr_retry_cnt;
1483         out_params->retry_cnt = qp->retry_cnt;
1484         out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1485         out_params->pkey_index = 0;
1486         out_params->max_rd_atomic = qp->max_rd_atomic_req;
1487         out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1488         out_params->sqd_async = qp->sqd_async;
1489
1490         if (IS_IWARP(p_hwfn))
1491                 rc = ecore_iwarp_query_qp(qp, out_params);
1492         else
1493                 rc = ecore_roce_query_qp(p_hwfn, qp, out_params);
1494
1495         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Query QP, rc = %d\n", rc);
1496         return rc;
1497 }
1498
1499
1500 enum _ecore_status_t ecore_rdma_destroy_qp(void *rdma_cxt,
1501                                            struct ecore_rdma_qp *qp,
1502                                            struct ecore_rdma_destroy_qp_out_params *out_params)
1503 {
1504         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1505         enum _ecore_status_t rc = ECORE_SUCCESS;
1506
1507         if (!rdma_cxt || !qp) {
1508                 DP_ERR(p_hwfn,
1509                        "ecore rdma destroy qp failed: invalid NULL input. rdma_cxt=%p, qp=%p\n",
1510                        rdma_cxt, qp);
1511                 return ECORE_INVAL;
1512         }
1513
1514         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)\n", qp->icid);
1515
1516         if (IS_IWARP(p_hwfn))
1517                 rc = ecore_iwarp_destroy_qp(p_hwfn, qp);
1518         else
1519                 rc = ecore_roce_destroy_qp(p_hwfn, qp, out_params);
1520
1521         /* free qp params struct */
1522         OSAL_FREE(p_hwfn->p_dev, qp);
1523
1524         return rc;
1525 }
1526
1527
1528 struct ecore_rdma_qp *ecore_rdma_create_qp(void                 *rdma_cxt,
1529                         struct ecore_rdma_create_qp_in_params   *in_params,
1530                         struct ecore_rdma_create_qp_out_params  *out_params)
1531 {
1532         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1533         struct ecore_rdma_qp *qp;
1534         u8 max_stats_queues;
1535         enum _ecore_status_t rc = 0;
1536
1537         if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1538                 DP_ERR(p_hwfn->p_dev,
1539                        "ecore roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1540                        rdma_cxt,
1541                        in_params,
1542                        out_params);
1543                 return OSAL_NULL;
1544         }
1545
1546         /* Some sanity checks... */
1547         max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1548         if (in_params->stats_queue >= max_stats_queues) {
1549                 DP_ERR(p_hwfn->p_dev,
1550                        "ecore rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1551                        in_params->stats_queue, max_stats_queues);
1552                 return OSAL_NULL;
1553         }
1554
1555         if (IS_IWARP(p_hwfn)) {
1556                 if (in_params->sq_num_pages*sizeof(struct regpair) >
1557                     IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1558                         DP_NOTICE(p_hwfn->p_dev, true, "Sq num pages: %d exceeds maximum\n",
1559                                   in_params->sq_num_pages);
1560                         return OSAL_NULL;
1561                 }
1562                 if (in_params->rq_num_pages*sizeof(struct regpair) >
1563                     IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1564                         DP_NOTICE(p_hwfn->p_dev, true,
1565                                   "Rq num pages: %d exceeds maximum\n",
1566                                   in_params->rq_num_pages);
1567                         return OSAL_NULL;
1568                 }
1569         }
1570
1571         qp = OSAL_ZALLOC(p_hwfn->p_dev,
1572                          GFP_KERNEL,
1573                          sizeof(struct ecore_rdma_qp));
1574         if (!qp)
1575         {
1576                 DP_NOTICE(p_hwfn, false, "Failed to allocate ecore_rdma_qp\n");
1577                 return OSAL_NULL;
1578         }
1579
1580         qp->cur_state = ECORE_ROCE_QP_STATE_RESET;
1581 #ifdef CONFIG_ECORE_IWARP
1582         qp->iwarp_state = ECORE_IWARP_QP_STATE_IDLE;
1583 #endif
1584         qp->qp_handle.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_hi);
1585         qp->qp_handle.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_lo);
1586         qp->qp_handle_async.hi = OSAL_CPU_TO_LE32(in_params->qp_handle_async_hi);
1587         qp->qp_handle_async.lo = OSAL_CPU_TO_LE32(in_params->qp_handle_async_lo);
1588         qp->use_srq = in_params->use_srq;
1589         qp->signal_all = in_params->signal_all;
1590         qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1591         qp->pd = in_params->pd;
1592         qp->dpi = in_params->dpi;
1593         qp->sq_cq_id = in_params->sq_cq_id;
1594         qp->sq_num_pages = in_params->sq_num_pages;
1595         qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1596         qp->rq_cq_id = in_params->rq_cq_id;
1597         qp->rq_num_pages = in_params->rq_num_pages;
1598         qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1599         qp->srq_id = in_params->srq_id;
1600         qp->req_offloaded = false;
1601         qp->resp_offloaded = false;
1602         /* e2e_flow_control cannot be done in case of S-RQ.
1603          * Refer to 9.7.7.2 End-to-End Flow Control section of IB spec
1604          */
1605         qp->e2e_flow_control_en = qp->use_srq ? false : true;
1606         qp->stats_queue = in_params->stats_queue;
1607         qp->qp_type = in_params->qp_type;
1608         qp->xrcd_id = in_params->xrcd_id;
1609
1610         if (IS_IWARP(p_hwfn)) {
1611                 rc = ecore_iwarp_create_qp(p_hwfn, qp, out_params);
1612                 qp->qpid = qp->icid;
1613         } else {
1614                 rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp->qp_idx);
1615                 qp->icid = ECORE_ROCE_QP_TO_ICID(qp->qp_idx);
1616                 qp->qpid = ((0xFF << 16) | qp->icid);
1617         }
1618
1619         if (rc != ECORE_SUCCESS) {
1620                 OSAL_FREE(p_hwfn->p_dev, qp);
1621                 return OSAL_NULL;
1622         }
1623
1624         out_params->icid = qp->icid;
1625         out_params->qp_id = qp->qpid;
1626
1627         /* INTERNAL: max_sq_sges future use only*/
1628
1629         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Create QP, rc = %d\n", rc);
1630         return qp;
1631 }
1632
1633 #define ECORE_RDMA_ECN_SHIFT 0
1634 #define ECORE_RDMA_ECN_MASK 0x3
1635 #define ECORE_RDMA_DSCP_SHIFT 2
1636 #define ECORE_RDMA_DSCP_MASK 0x3f
1637 #define ECORE_RDMA_VLAN_PRIO_SHIFT 13
1638 #define ECORE_RDMA_VLAN_PRIO_MASK 0x7
1639 enum _ecore_status_t ecore_rdma_modify_qp(
1640         void *rdma_cxt,
1641         struct ecore_rdma_qp *qp,
1642         struct ecore_rdma_modify_qp_in_params *params)
1643 {
1644         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1645         enum ecore_roce_qp_state prev_state;
1646         enum _ecore_status_t     rc = ECORE_SUCCESS;
1647
1648         if (GET_FIELD(params->modify_flags,
1649                       ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN))
1650         {
1651                 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1652                 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1653                 qp->incoming_atomic_en = params->incoming_atomic_en;
1654         }
1655
1656         /* Update QP structure with the updated values */
1657         if (GET_FIELD(params->modify_flags,
1658                       ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1659         {
1660                 qp->roce_mode = params->roce_mode;
1661         }
1662         if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY))
1663         {
1664                 qp->pkey = params->pkey;
1665         }
1666         if (GET_FIELD(params->modify_flags,
1667                       ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1668         {
1669                 qp->e2e_flow_control_en = params->e2e_flow_control_en;
1670         }
1671         if (GET_FIELD(params->modify_flags,
1672                       ECORE_ROCE_MODIFY_QP_VALID_DEST_QP))
1673         {
1674                 qp->dest_qp = params->dest_qp;
1675         }
1676         if (GET_FIELD(params->modify_flags,
1677                       ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR))
1678         {
1679                 /* Indicates that the following parameters have changed:
1680                  * Traffic class, flow label, hop limit, source GID,
1681                  * destination GID, loopback indicator
1682                  */
1683                 qp->flow_label = params->flow_label;
1684                 qp->hop_limit_ttl = params->hop_limit_ttl;
1685
1686                 qp->sgid = params->sgid;
1687                 qp->dgid = params->dgid;
1688                 qp->udp_src_port = params->udp_src_port;
1689                 qp->vlan_id = params->vlan_id;
1690                 qp->traffic_class_tos = params->traffic_class_tos;
1691
1692                 /* apply global override values */
1693                 if (p_hwfn->p_rdma_info->glob_cfg.vlan_pri_en)
1694                         SET_FIELD(qp->vlan_id, ECORE_RDMA_VLAN_PRIO,
1695                                   p_hwfn->p_rdma_info->glob_cfg.vlan_pri);
1696
1697                 if (p_hwfn->p_rdma_info->glob_cfg.ecn_en)
1698                         SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_ECN,
1699                                   p_hwfn->p_rdma_info->glob_cfg.ecn);
1700
1701                 if (p_hwfn->p_rdma_info->glob_cfg.dscp_en)
1702                         SET_FIELD(qp->traffic_class_tos, ECORE_RDMA_DSCP,
1703                                   p_hwfn->p_rdma_info->glob_cfg.dscp);
1704
1705                 qp->mtu = params->mtu;
1706
1707                 OSAL_MEMCPY((u8 *)&qp->remote_mac_addr[0],
1708                             (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
1709                 if (params->use_local_mac) {
1710                         OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
1711                                     (u8 *)&params->local_mac_addr[0],
1712                                     ETH_ALEN);
1713                 } else {
1714                         OSAL_MEMCPY((u8 *)&qp->local_mac_addr[0],
1715                                     (u8 *)&p_hwfn->hw_info.hw_mac_addr,
1716                                     ETH_ALEN);
1717                 }
1718         }
1719         if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN))
1720         {
1721                 qp->rq_psn = params->rq_psn;
1722         }
1723         if (GET_FIELD(params->modify_flags, ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN))
1724         {
1725                 qp->sq_psn = params->sq_psn;
1726         }
1727         if (GET_FIELD(params->modify_flags,
1728                       ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1729         {
1730                 qp->max_rd_atomic_req = params->max_rd_atomic_req;
1731         }
1732         if (GET_FIELD(params->modify_flags,
1733                       ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1734         {
1735                 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1736         }
1737         if (GET_FIELD(params->modify_flags,
1738                       ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1739         {
1740                 qp->ack_timeout = params->ack_timeout;
1741         }
1742         if (GET_FIELD(params->modify_flags,
1743                       ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1744         {
1745                 qp->retry_cnt = params->retry_cnt;
1746         }
1747         if (GET_FIELD(params->modify_flags,
1748                       ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1749         {
1750                 qp->rnr_retry_cnt = params->rnr_retry_cnt;
1751         }
1752         if (GET_FIELD(params->modify_flags,
1753                       ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1754         {
1755                 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1756         }
1757
1758         qp->sqd_async = params->sqd_async;
1759
1760         prev_state = qp->cur_state;
1761         if (GET_FIELD(params->modify_flags,
1762                       ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE))
1763         {
1764                 qp->cur_state = params->new_state;
1765                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp->cur_state=%d\n",
1766                            qp->cur_state);
1767         }
1768
1769         if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_INI) {
1770                 qp->has_req = 1;
1771         } else if (qp->qp_type == ECORE_RDMA_QP_TYPE_XRC_TGT)
1772         {
1773                 qp->has_resp = 1;
1774         } else {
1775                 qp->has_req = 1;
1776                 qp->has_resp = 1;
1777         }
1778
1779         if (IS_IWARP(p_hwfn)) {
1780                 enum ecore_iwarp_qp_state new_state =
1781                         ecore_roce2iwarp_state(qp->cur_state);
1782
1783                 rc = ecore_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1784         } else {
1785                 rc = ecore_roce_modify_qp(p_hwfn, qp, prev_state, params);
1786         }
1787
1788         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1789         return rc;
1790 }
1791
1792 enum _ecore_status_t ecore_rdma_register_tid(void                *rdma_cxt,
1793                         struct ecore_rdma_register_tid_in_params *params)
1794 {
1795         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1796         struct rdma_register_tid_ramrod_data *p_ramrod;
1797         struct ecore_sp_init_data            init_data;
1798         struct ecore_spq_entry               *p_ent;
1799         enum rdma_tid_type                   tid_type;
1800         u8                                   fw_return_code;
1801         enum _ecore_status_t                 rc;
1802
1803         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "itid = %08x\n", params->itid);
1804
1805         /* Get SPQ entry */
1806         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1807         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1808         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1809
1810         rc = ecore_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1811                                    p_hwfn->p_rdma_info->proto, &init_data);
1812         if (rc != ECORE_SUCCESS) {
1813                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1814                 return rc;
1815         }
1816
1817         if (p_hwfn->p_rdma_info->last_tid < params->itid) {
1818                 p_hwfn->p_rdma_info->last_tid = params->itid;
1819         }
1820
1821         p_ramrod = &p_ent->ramrod.rdma_register_tid;
1822
1823         p_ramrod->flags = 0;
1824         SET_FIELD(p_ramrod->flags,
1825                   RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1826                   params->pbl_two_level);
1827
1828         SET_FIELD(p_ramrod->flags,
1829                   RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
1830                   params->zbva);
1831
1832         SET_FIELD(p_ramrod->flags,
1833                   RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR,
1834                   params->phy_mr);
1835
1836         /* Don't initialize D/C field, as it may override other bits. */
1837         if (!(params->tid_type == ECORE_RDMA_TID_FMR) &&
1838             !(params->dma_mr))
1839                 SET_FIELD(p_ramrod->flags,
1840                           RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1841                           params->page_size_log - 12);
1842
1843         SET_FIELD(p_ramrod->flags,
1844                   RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1845                   params->remote_read);
1846
1847         SET_FIELD(p_ramrod->flags,
1848                   RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1849                   params->remote_write);
1850
1851         SET_FIELD(p_ramrod->flags,
1852                   RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1853                   params->remote_atomic);
1854
1855         SET_FIELD(p_ramrod->flags,
1856                   RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1857                   params->local_write);
1858
1859         SET_FIELD(p_ramrod->flags,
1860                   RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
1861                   params->local_read);
1862
1863         SET_FIELD(p_ramrod->flags,
1864                   RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1865                   params->mw_bind);
1866
1867         SET_FIELD(p_ramrod->flags1,
1868                   RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1869                   params->pbl_page_size_log - 12);
1870
1871         SET_FIELD(p_ramrod->flags2,
1872                   RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
1873                   params->dma_mr);
1874
1875         switch (params->tid_type)
1876         {
1877         case ECORE_RDMA_TID_REGISTERED_MR:
1878                 tid_type = RDMA_TID_REGISTERED_MR;
1879                 break;
1880         case ECORE_RDMA_TID_FMR:
1881                 tid_type = RDMA_TID_FMR;
1882                 break;
1883         case ECORE_RDMA_TID_MW_TYPE1:
1884                 tid_type = RDMA_TID_MW_TYPE1;
1885                 break;
1886         case ECORE_RDMA_TID_MW_TYPE2A:
1887                 tid_type = RDMA_TID_MW_TYPE2A;
1888                 break;
1889         default:
1890                 rc = ECORE_INVAL;
1891                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1892                 return rc;
1893         }
1894         SET_FIELD(p_ramrod->flags1,
1895                   RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
1896                   tid_type);
1897
1898         p_ramrod->itid = OSAL_CPU_TO_LE32(params->itid);
1899         p_ramrod->key = params->key;
1900         p_ramrod->pd = OSAL_CPU_TO_LE16(params->pd);
1901         p_ramrod->length_hi = (u8)(params->length >> 32);
1902         p_ramrod->length_lo = DMA_LO_LE(params->length);
1903         if (params->zbva)
1904         {
1905                 /* Lower 32 bits of the registered MR address.
1906                  * In case of zero based MR, will hold FBO
1907                  */
1908                 p_ramrod->va.hi = 0;
1909                 p_ramrod->va.lo = OSAL_CPU_TO_LE32(params->fbo);
1910         } else {
1911                 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1912         }
1913         DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1914
1915         /* DIF */
1916         if (params->dif_enabled) {
1917                 SET_FIELD(p_ramrod->flags2,
1918                           RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1919                 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1920                                params->dif_error_addr);
1921                 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
1922         }
1923
1924         rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
1925         if (rc)
1926                 return rc;
1927
1928         if (fw_return_code != RDMA_RETURN_OK) {
1929                 DP_NOTICE(p_hwfn, true, "fw_return_code = %d\n", fw_return_code);
1930                 return ECORE_UNKNOWN_ERROR;
1931         }
1932
1933         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Register TID, rc = %d\n", rc);
1934         return rc;
1935 }
1936
1937 static OSAL_INLINE int ecore_rdma_send_deregister_tid_ramrod(
1938                 struct ecore_hwfn *p_hwfn,
1939                 u32 itid,
1940                 u8 *fw_return_code)
1941 {
1942         struct ecore_sp_init_data              init_data;
1943         struct rdma_deregister_tid_ramrod_data *p_ramrod;
1944         struct ecore_spq_entry                 *p_ent;
1945         enum _ecore_status_t                   rc;
1946
1947         /* Get SPQ entry */
1948         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1949         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1950         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1951
1952         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1953                                    RDMA_RAMROD_DEREGISTER_MR,
1954                                    p_hwfn->p_rdma_info->proto, &init_data);
1955         if (rc != ECORE_SUCCESS) {
1956                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1957                 return rc;
1958         }
1959
1960         p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1961         p_ramrod->itid = OSAL_CPU_TO_LE32(itid);
1962
1963         rc = ecore_spq_post(p_hwfn, p_ent, fw_return_code);
1964         if (rc != ECORE_SUCCESS)
1965         {
1966                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
1967                 return rc;
1968         }
1969
1970         return rc;
1971 }
1972
1973 #define ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC      (1)
1974
1975 enum _ecore_status_t ecore_rdma_deregister_tid(void     *rdma_cxt,
1976                                                u32      itid)
1977 {
1978         enum _ecore_status_t                   rc;
1979         u8                                     fw_ret_code;
1980         struct ecore_ptt                       *p_ptt;
1981         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1982
1983         /* First attempt */
1984         rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
1985         if (rc != ECORE_SUCCESS)
1986                 return rc;
1987
1988         if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
1989                 goto done;
1990
1991         /* Second attempt, after 1msec, if device still holds data.
1992          * This can occur since 'destroy QP' returns to the caller rather fast.
1993          * The synchronous part of it returns after freeing a few of the
1994          * resources but not all of them, allowing the consumer to continue its
1995          * flow. All of the resources will be freed after the asynchronous part
1996          * of the destroy QP is complete.
1997          */
1998         OSAL_MSLEEP(ECORE_RDMA_DEREGISTER_TIMEOUT_MSEC);
1999         rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
2000         if (rc != ECORE_SUCCESS)
2001                 return rc;
2002
2003         if (fw_ret_code != RDMA_RETURN_NIG_DRAIN_REQ)
2004                 goto done;
2005
2006         /* Third and last attempt, perform NIG drain and resend the ramrod */
2007         p_ptt = ecore_ptt_acquire(p_hwfn);
2008         if (!p_ptt)
2009                 return ECORE_TIMEOUT;
2010
2011         rc = ecore_mcp_drain(p_hwfn, p_ptt);
2012         if (rc != ECORE_SUCCESS) {
2013                 ecore_ptt_release(p_hwfn, p_ptt);
2014                 return rc;
2015         }
2016
2017         ecore_ptt_release(p_hwfn, p_ptt);
2018
2019         rc = ecore_rdma_send_deregister_tid_ramrod(p_hwfn, itid, &fw_ret_code);
2020         if (rc != ECORE_SUCCESS)
2021                 return rc;
2022
2023 done:
2024         if (fw_ret_code == RDMA_RETURN_OK) {
2025                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "De-registered itid=%d\n",
2026                            itid);
2027                 return ECORE_SUCCESS;
2028         } else if (fw_ret_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2029                 /* INTERNAL: This error is returned in case trying to deregister
2030                  * a MR that is not allocated. We define "allocated" as either:
2031                  * 1. Registered.
2032                  * 2. This is an FMR MR type, which is not currently registered
2033                  *    but can accept FMR WQEs on SQ.
2034                  */
2035                 DP_NOTICE(p_hwfn, false, "itid=%d, fw_ret_code=%d\n", itid,
2036                           fw_ret_code);
2037                 return ECORE_INVAL;
2038         } else { /* fw_ret_code == RDMA_RETURN_NIG_DRAIN_REQ */
2039                 DP_NOTICE(p_hwfn, true,
2040                           "deregister failed after three attempts. itid=%d, fw_ret_code=%d\n",
2041                           itid, fw_ret_code);
2042                 return ECORE_UNKNOWN_ERROR;
2043         }
2044 }
2045
2046 static struct ecore_bmap *ecore_rdma_get_srq_bmap(struct ecore_hwfn *p_hwfn, bool is_xrc)
2047 {
2048         if (is_xrc)
2049                 return &p_hwfn->p_rdma_info->xrc_srq_map;
2050
2051         return &p_hwfn->p_rdma_info->srq_map;
2052 }
2053
2054 u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc)
2055 {
2056         if (is_xrc)
2057                 return id;
2058
2059         return id + p_hwfn->p_rdma_info->srq_id_offset;
2060 }
2061
2062 enum _ecore_status_t
2063 ecore_rdma_modify_srq(void *rdma_cxt,
2064                       struct ecore_rdma_modify_srq_in_params *in_params)
2065 {
2066         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2067         struct rdma_srq_modify_ramrod_data *p_ramrod;
2068         struct ecore_sp_init_data init_data;
2069         struct ecore_spq_entry *p_ent;
2070         u16 opaque_fid, fw_srq_id;
2071         enum _ecore_status_t rc;
2072
2073         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2074         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2075         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2076         /* Send modify SRQ ramrod */
2077         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2078                                    RDMA_RAMROD_MODIFY_SRQ,
2079                                    p_hwfn->p_rdma_info->proto, &init_data);
2080         if (rc != ECORE_SUCCESS)
2081                 return rc;
2082
2083         p_ramrod = &p_ent->ramrod.rdma_modify_srq;
2084
2085         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
2086                                              in_params->is_xrc);
2087         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2088         opaque_fid = p_hwfn->hw_info.opaque_fid;
2089         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2090         p_ramrod->wqe_limit = OSAL_CPU_TO_LE16(in_params->wqe_limit);
2091
2092         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2093         if (rc != ECORE_SUCCESS)
2094                 return rc;
2095
2096         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
2097                    in_params->srq_id, in_params->is_xrc);
2098
2099         return rc;
2100 }
2101
2102 enum _ecore_status_t
2103 ecore_rdma_destroy_srq(void *rdma_cxt,
2104                        struct ecore_rdma_destroy_srq_in_params *in_params)
2105 {
2106         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2107         struct rdma_srq_destroy_ramrod_data *p_ramrod;
2108         struct ecore_sp_init_data init_data;
2109         struct ecore_spq_entry *p_ent;
2110         u16 opaque_fid, fw_srq_id;
2111         struct ecore_bmap *bmap;
2112         enum _ecore_status_t rc;
2113
2114         opaque_fid = p_hwfn->hw_info.opaque_fid;
2115
2116         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2117         init_data.opaque_fid = opaque_fid;
2118         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2119
2120         /* Send destroy SRQ ramrod */
2121         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2122                                    RDMA_RAMROD_DESTROY_SRQ,
2123                                    p_hwfn->p_rdma_info->proto, &init_data);
2124         if (rc != ECORE_SUCCESS)
2125                 return rc;
2126
2127         p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
2128
2129         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, in_params->srq_id,
2130                                              in_params->is_xrc);
2131         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2132         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2133
2134         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2135
2136         if (rc != ECORE_SUCCESS)
2137                 return rc;
2138
2139         bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
2140
2141         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2142         ecore_bmap_release_id(p_hwfn, bmap, in_params->srq_id);
2143         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2144
2145         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2146                    "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
2147                    in_params->srq_id, in_params->is_xrc);
2148
2149         return rc;
2150 }
2151
2152 enum _ecore_status_t
2153 ecore_rdma_create_srq(void *rdma_cxt,
2154                       struct ecore_rdma_create_srq_in_params *in_params,
2155                       struct ecore_rdma_create_srq_out_params *out_params)
2156 {
2157         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2158         struct rdma_srq_create_ramrod_data *p_ramrod;
2159         struct ecore_sp_init_data init_data;
2160         enum ecore_cxt_elem_type elem_type;
2161         struct ecore_spq_entry *p_ent;
2162         u16 opaque_fid, fw_srq_id;
2163         struct ecore_bmap *bmap;
2164         u32 returned_id;
2165         enum _ecore_status_t rc;
2166
2167         /* Allocate XRC/SRQ ID */
2168         bmap = ecore_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
2169         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2170         rc = ecore_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
2171         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2172
2173         if (rc != ECORE_SUCCESS) {
2174                 DP_NOTICE(p_hwfn, false,
2175                           "failed to allocate xrc/srq id (is_xrc=%u)\n",
2176                           in_params->is_xrc);
2177                 return rc;
2178         }
2179         /* Allocate XRC/SRQ ILT page */
2180         elem_type = (in_params->is_xrc) ? (ECORE_ELEM_XRC_SRQ) : (ECORE_ELEM_SRQ);
2181         rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
2182         if (rc != ECORE_SUCCESS)
2183                 goto err;
2184
2185         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2186         opaque_fid = p_hwfn->hw_info.opaque_fid;
2187         init_data.opaque_fid = opaque_fid;
2188         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2189
2190         /* Create XRC/SRQ ramrod */
2191         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2192                                    RDMA_RAMROD_CREATE_SRQ,
2193                                    p_hwfn->p_rdma_info->proto, &init_data);
2194         if (rc != ECORE_SUCCESS)
2195                 goto err;
2196
2197         p_ramrod = &p_ent->ramrod.rdma_create_srq;
2198
2199         p_ramrod->pbl_base_addr.hi = DMA_HI_LE(in_params->pbl_base_addr);
2200         p_ramrod->pbl_base_addr.lo = DMA_LO_LE(in_params->pbl_base_addr);
2201         p_ramrod->pages_in_srq_pbl = OSAL_CPU_TO_LE16(in_params->num_pages);
2202         p_ramrod->pd_id = OSAL_CPU_TO_LE16(in_params->pd_id);
2203         p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(opaque_fid);
2204         p_ramrod->page_size = OSAL_CPU_TO_LE16(in_params->page_size);
2205         p_ramrod->producers_addr.hi = DMA_HI_LE(in_params->prod_pair_addr);
2206         p_ramrod->producers_addr.lo = DMA_LO_LE(in_params->prod_pair_addr);
2207         fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, (u16) returned_id,
2208                                              in_params->is_xrc);
2209         p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id);
2210
2211         if (in_params->is_xrc) {
2212                 SET_FIELD(p_ramrod->flags,
2213                           RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG,
2214                           1);
2215                 SET_FIELD(p_ramrod->flags,
2216                           RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
2217                           in_params->reserved_key_en);
2218                 p_ramrod->xrc_srq_cq_cid = OSAL_CPU_TO_LE32(in_params->cq_cid);
2219                 p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(in_params->xrcd_id);
2220         }
2221
2222         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2223
2224         if (rc != ECORE_SUCCESS)
2225                 goto err;
2226
2227         out_params->srq_id = (u16)returned_id;
2228
2229         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "XRC/SRQ created Id = %x (is_xrc=%u)\n",
2230                    out_params->srq_id, in_params->is_xrc);
2231         return rc;
2232
2233 err:
2234         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2235         ecore_bmap_release_id(p_hwfn, bmap, returned_id);
2236         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2237
2238         return rc;
2239 }
2240
2241 bool ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn)
2242 {
2243         bool result;
2244
2245         /* if rdma info has not been allocated, naturally there are no qps */
2246         if (!p_hwfn->p_rdma_info)
2247                 return false;
2248
2249         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2250         if (!p_hwfn->p_rdma_info->qp_map.bitmap)
2251                 result = false;
2252         else
2253                 result = !ecore_bmap_is_empty(&p_hwfn->p_rdma_info->qp_map);
2254         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2255         return result;
2256 }
2257
2258 enum _ecore_status_t ecore_rdma_resize_cq(void                  *rdma_cxt,
2259                         struct ecore_rdma_resize_cq_in_params   *in_params,
2260                         struct ecore_rdma_resize_cq_out_params  *out_params)
2261 {
2262         enum _ecore_status_t                    rc;
2263         enum ecore_rdma_toggle_bit              toggle_bit;
2264         struct ecore_spq_entry                  *p_ent;
2265         struct rdma_resize_cq_ramrod_data       *p_ramrod;
2266         u8                                      fw_return_code;
2267         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2268         dma_addr_t                                                      ramrod_res_phys;
2269         struct rdma_resize_cq_output_params     *p_ramrod_res;
2270         struct ecore_sp_init_data               init_data;
2271
2272         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", in_params->icid);
2273
2274         /* Send resize CQ ramrod */
2275
2276         p_ramrod_res = (struct rdma_resize_cq_output_params *)
2277                         OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys,
2278                                 sizeof(*p_ramrod_res));
2279         if (!p_ramrod_res)
2280         {
2281                 rc = ECORE_NOMEM;
2282                 DP_NOTICE(p_hwfn, false,
2283                           "ecore resize cq failed: cannot allocate memory (ramrod). rc = %d\n",
2284                           rc);
2285                 return rc;
2286         }
2287
2288         /* Get SPQ entry */
2289         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2290         init_data.cid = in_params->icid;
2291         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2292         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2293
2294         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2295                                    RDMA_RAMROD_RESIZE_CQ,
2296                                    p_hwfn->p_rdma_info->proto, &init_data);
2297         if (rc != ECORE_SUCCESS)
2298                 goto err;
2299
2300         p_ramrod = &p_ent->ramrod.rdma_resize_cq;
2301
2302         p_ramrod->flags = 0;
2303
2304         /* toggle the bit for every resize or create cq for a given icid */
2305         toggle_bit = ecore_rdma_toggle_bit_create_resize_cq(p_hwfn,
2306                                                             in_params->icid);
2307
2308         SET_FIELD(p_ramrod->flags,
2309                   RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT,
2310                   toggle_bit);
2311
2312         SET_FIELD(p_ramrod->flags,
2313                   RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
2314                   in_params->pbl_two_level);
2315
2316         p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
2317         p_ramrod->pbl_num_pages = OSAL_CPU_TO_LE16(in_params->pbl_num_pages);
2318         p_ramrod->max_cqes = OSAL_CPU_TO_LE32(in_params->cq_size);
2319         p_ramrod->pbl_addr.hi = DMA_HI_LE(in_params->pbl_ptr);
2320         p_ramrod->pbl_addr.lo = DMA_LO_LE(in_params->pbl_ptr);
2321
2322         p_ramrod->output_params_addr.hi = DMA_HI_LE(ramrod_res_phys);
2323         p_ramrod->output_params_addr.lo = DMA_LO_LE(ramrod_res_phys);
2324
2325         rc = ecore_spq_post(p_hwfn, p_ent, &fw_return_code);
2326         if (rc != ECORE_SUCCESS)
2327                 goto err;
2328
2329         if (fw_return_code != RDMA_RETURN_OK)
2330         {
2331                 DP_NOTICE(p_hwfn, fw_return_code != RDMA_RETURN_RESIZE_CQ_ERR,
2332                           "fw_return_code = %d\n", fw_return_code);
2333                 DP_NOTICE(p_hwfn,
2334                           true, "fw_return_code = %d\n", fw_return_code);
2335                 rc = ECORE_UNKNOWN_ERROR;
2336                 goto err;
2337         }
2338
2339         out_params->prod = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_prod);
2340         out_params->cons = OSAL_LE32_TO_CPU(p_ramrod_res->old_cq_cons);
2341
2342         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
2343                                sizeof(*p_ramrod_res));
2344
2345         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
2346
2347         return rc;
2348
2349 err:
2350         OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys,
2351                                sizeof(*p_ramrod_res));
2352         DP_NOTICE(p_hwfn, false, "rc = %d\n", rc);
2353
2354         return rc;
2355 }
2356
2357 enum _ecore_status_t ecore_rdma_start(void *rdma_cxt,
2358                                 struct ecore_rdma_start_in_params *params)
2359 {
2360         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2361         struct ecore_ptt *p_ptt;
2362         enum _ecore_status_t rc = ECORE_TIMEOUT;
2363
2364         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2365                    "desired_cnq = %08x\n", params->desired_cnq);
2366
2367         p_ptt = ecore_ptt_acquire(p_hwfn);
2368         if (!p_ptt)
2369                 goto err;
2370
2371         rc = ecore_rdma_alloc(p_hwfn);
2372         if (rc)
2373                 goto err1;
2374
2375         rc = ecore_rdma_setup(p_hwfn, p_ptt, params);
2376         if (rc)
2377                 goto err2;
2378
2379         ecore_ptt_release(p_hwfn, p_ptt);
2380
2381         ecore_rdma_activate(p_hwfn);
2382         return rc;
2383
2384 err2:
2385         ecore_rdma_free(p_hwfn);
2386 err1:
2387         ecore_ptt_release(p_hwfn, p_ptt);
2388 err:
2389         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2390         return rc;
2391 }
2392
2393 enum _ecore_status_t ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
2394                                 struct ecore_rdma_stats_out_params *out_params)
2395 {
2396         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2397         u8 abs_stats_queue, max_stats_queues;
2398         u32 pstats_addr, tstats_addr, addr;
2399         struct ecore_rdma_info *info;
2400         struct ecore_ptt *p_ptt;
2401 #ifdef CONFIG_ECORE_IWARP
2402         u32 xstats_addr;
2403 #endif
2404         enum _ecore_status_t rc = ECORE_SUCCESS;
2405
2406         if (!p_hwfn)
2407                 return ECORE_INVAL;
2408
2409         if (!p_hwfn->p_rdma_info) {
2410                 DP_INFO(p_hwfn->p_dev, "ecore rdma query stats failed due to NULL rdma_info\n");
2411                 return ECORE_INVAL;
2412         }
2413
2414         info = p_hwfn->p_rdma_info;
2415
2416         rc = ecore_rdma_inc_ref_cnt(p_hwfn);
2417         if (rc != ECORE_SUCCESS)
2418                 return rc;
2419
2420         max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2421         if (stats_queue >= max_stats_queues) {
2422                 DP_ERR(p_hwfn->p_dev,
2423                        "ecore rdma query stats failed due to invalid statistics queue %d. maximum is %d\n",
2424                        stats_queue, max_stats_queues);
2425                 rc = ECORE_INVAL;
2426                 goto err;
2427         }
2428
2429         /* Statistics collected in statistics queues (for PF/VF) */
2430         abs_stats_queue = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
2431                             stats_queue;
2432         pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2433                       PSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
2434         tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2435                       TSTORM_RDMA_QUEUE_STAT_OFFSET(abs_stats_queue);
2436
2437 #ifdef CONFIG_ECORE_IWARP
2438         /* Statistics per PF ID */
2439         xstats_addr = BAR0_MAP_REG_XSDM_RAM +
2440                       XSTORM_IWARP_RXMIT_STATS_OFFSET(p_hwfn->rel_pf_id);
2441 #endif
2442
2443         OSAL_MEMSET(&info->rdma_sent_pstats, 0, sizeof(info->rdma_sent_pstats));
2444         OSAL_MEMSET(&info->rdma_rcv_tstats, 0, sizeof(info->rdma_rcv_tstats));
2445         OSAL_MEMSET(&info->roce.event_stats, 0, sizeof(info->roce.event_stats));
2446         OSAL_MEMSET(&info->roce.dcqcn_rx_stats, 0,sizeof(info->roce.dcqcn_rx_stats));
2447         OSAL_MEMSET(&info->roce.dcqcn_tx_stats, 0,sizeof(info->roce.dcqcn_tx_stats));
2448 #ifdef CONFIG_ECORE_IWARP
2449         OSAL_MEMSET(&info->iwarp.stats, 0, sizeof(info->iwarp.stats));
2450 #endif
2451
2452         p_ptt = ecore_ptt_acquire(p_hwfn);
2453
2454         if (!p_ptt) {
2455                 rc = ECORE_TIMEOUT;
2456                 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc);
2457                 goto err;
2458         }
2459
2460         ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_sent_pstats,
2461                           pstats_addr, sizeof(struct rdma_sent_stats));
2462
2463         ecore_memcpy_from(p_hwfn, p_ptt, &info->rdma_rcv_tstats,
2464                           tstats_addr, sizeof(struct rdma_rcv_stats));
2465
2466         addr = BAR0_MAP_REG_TSDM_RAM +
2467                TSTORM_ROCE_EVENTS_STAT_OFFSET(p_hwfn->rel_pf_id);
2468         ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.event_stats, addr,
2469                           sizeof(struct roce_events_stats));
2470
2471         addr = BAR0_MAP_REG_YSDM_RAM +
2472                 YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(p_hwfn->rel_pf_id);
2473         ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_rx_stats, addr,
2474                           sizeof(struct roce_dcqcn_received_stats));
2475
2476         addr = BAR0_MAP_REG_PSDM_RAM +
2477                PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(p_hwfn->rel_pf_id);
2478         ecore_memcpy_from(p_hwfn, p_ptt, &info->roce.dcqcn_tx_stats, addr,
2479                           sizeof(struct roce_dcqcn_sent_stats));
2480
2481 #ifdef CONFIG_ECORE_IWARP
2482         ecore_memcpy_from(p_hwfn, p_ptt, &info->iwarp.stats,
2483                           xstats_addr, sizeof(struct iwarp_rxmit_stats_drv));
2484 #endif
2485
2486         ecore_ptt_release(p_hwfn, p_ptt);
2487
2488         OSAL_MEMSET(out_params, 0, sizeof(*out_params));
2489
2490         out_params->sent_bytes =
2491                 HILO_64_REGPAIR(info->rdma_sent_pstats.sent_bytes);
2492         out_params->sent_pkts =
2493                 HILO_64_REGPAIR(info->rdma_sent_pstats.sent_pkts);
2494         out_params->rcv_bytes =
2495                 HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_bytes);
2496         out_params->rcv_pkts =
2497                 HILO_64_REGPAIR(info->rdma_rcv_tstats.rcv_pkts);
2498
2499         out_params->silent_drops =
2500                 OSAL_LE16_TO_CPU(info->roce.event_stats.silent_drops);
2501         out_params->rnr_nacks_sent =
2502                 OSAL_LE16_TO_CPU(info->roce.event_stats.rnr_naks_sent);
2503         out_params->icrc_errors =
2504                 OSAL_LE32_TO_CPU(info->roce.event_stats.icrc_error_count);
2505         out_params->retransmit_events =
2506                 OSAL_LE32_TO_CPU(info->roce.event_stats.retransmit_count);
2507         out_params->ecn_pkt_rcv =
2508                 HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.ecn_pkt_rcv);
2509         out_params->cnp_pkt_rcv =
2510                 HILO_64_REGPAIR(info->roce.dcqcn_rx_stats.cnp_pkt_rcv);
2511         out_params->cnp_pkt_sent =
2512                 HILO_64_REGPAIR(info->roce.dcqcn_tx_stats.cnp_pkt_sent);
2513
2514 #ifdef CONFIG_ECORE_IWARP
2515         out_params->iwarp_tx_fast_rxmit_cnt =
2516                 HILO_64_REGPAIR(info->iwarp.stats.tx_fast_retransmit_event_cnt);
2517         out_params->iwarp_tx_slow_start_cnt =
2518                 HILO_64_REGPAIR(
2519                         info->iwarp.stats.tx_go_to_slow_start_event_cnt);
2520         out_params->unalign_rx_comp = info->iwarp.unalign_rx_comp;
2521 #endif
2522
2523 err:
2524         ecore_rdma_dec_ref_cnt(p_hwfn);
2525
2526         return rc;
2527 }
2528
2529 enum _ecore_status_t
2530 ecore_rdma_query_counters(void *rdma_cxt,
2531                           struct ecore_rdma_counters_out_params *out_params)
2532 {
2533         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2534         unsigned long *bitmap;
2535         unsigned int nbits;
2536
2537         if (!p_hwfn->p_rdma_info)
2538                 return ECORE_INVAL;
2539
2540         OSAL_MEMSET(out_params, 0, sizeof(*out_params));
2541
2542         bitmap = p_hwfn->p_rdma_info->pd_map.bitmap;
2543         nbits = p_hwfn->p_rdma_info->pd_map.max_count;
2544         out_params->pd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2545         out_params->max_pd = nbits;
2546
2547         bitmap = p_hwfn->p_rdma_info->dpi_map.bitmap;
2548         nbits = p_hwfn->p_rdma_info->dpi_map.max_count;
2549         out_params->dpi_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2550         out_params->max_dpi = nbits;
2551
2552         bitmap = p_hwfn->p_rdma_info->cq_map.bitmap;
2553         nbits = p_hwfn->p_rdma_info->cq_map.max_count;
2554         out_params->cq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2555         out_params->max_cq = nbits;
2556
2557         bitmap = p_hwfn->p_rdma_info->qp_map.bitmap;
2558         nbits = p_hwfn->p_rdma_info->qp_map.max_count;
2559         out_params->qp_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2560         out_params->max_qp = nbits;
2561
2562         bitmap = p_hwfn->p_rdma_info->tid_map.bitmap;
2563         nbits = p_hwfn->p_rdma_info->tid_map.max_count;
2564         out_params->tid_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2565         out_params->max_tid = nbits;
2566
2567         bitmap = p_hwfn->p_rdma_info->srq_map.bitmap;
2568         nbits = p_hwfn->p_rdma_info->srq_map.max_count;
2569         out_params->srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2570         out_params->max_srq = nbits;
2571
2572         bitmap = p_hwfn->p_rdma_info->xrc_srq_map.bitmap;
2573         nbits = p_hwfn->p_rdma_info->xrc_srq_map.max_count;
2574         out_params->xrc_srq_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2575         out_params->max_xrc_srq = nbits;
2576
2577         bitmap = p_hwfn->p_rdma_info->xrcd_map.bitmap;
2578         nbits = p_hwfn->p_rdma_info->xrcd_map.max_count;
2579         out_params->xrcd_count = OSAL_BITMAP_WEIGHT(bitmap, nbits);
2580         out_params->max_xrcd = nbits;
2581
2582         return ECORE_SUCCESS;
2583 }
2584
2585 enum _ecore_status_t ecore_rdma_resize_cnq(void                       *rdma_cxt,
2586                                 struct ecore_rdma_resize_cnq_in_params *params)
2587 {
2588         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2589
2590         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "cnq_id = %08x\n", params->cnq_id);
2591
2592         /* @@@TBD: waiting for fw (there is no ramrod yet) */
2593         return ECORE_NOTIMPL;
2594 }
2595
2596 void ecore_rdma_remove_user(void        *rdma_cxt,
2597                             u16         dpi)
2598 {
2599         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
2600
2601         DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "dpi = %08x\n", dpi);
2602
2603         OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
2604         ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2605         OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
2606 }
2607
2608 #ifndef LINUX_REMOVE
2609 enum _ecore_status_t
2610 ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
2611                         struct ecore_rdma_glob_cfg *in_params,
2612                         u32 glob_cfg_bits)
2613 {
2614         struct ecore_rdma_glob_cfg glob_cfg;
2615         enum _ecore_status_t rc = ECORE_SUCCESS;
2616
2617         DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_RDMA,
2618                    "dscp %d dscp en %d ecn %d ecn en %d vlan pri %d vlan_pri_en %d\n",
2619                    in_params->dscp, in_params->dscp_en,
2620                    in_params->ecn, in_params->ecn_en, in_params->vlan_pri,
2621                    in_params->vlan_pri_en);
2622
2623         /* Read global cfg to local */
2624         OSAL_MEMCPY(&glob_cfg, &p_hwfn->p_rdma_info->glob_cfg,
2625                     sizeof(glob_cfg));
2626
2627         if (glob_cfg_bits & ECORE_RDMA_DCSP_BIT_MASK) {
2628                 if (in_params->dscp > MAX_DSCP) {
2629                         DP_ERR(p_hwfn->p_dev, "invalid glob dscp %d\n",
2630                                in_params->dscp);
2631                         return ECORE_INVAL;
2632                 }
2633                 glob_cfg.dscp = in_params->dscp;
2634         }
2635
2636         if (glob_cfg_bits & ECORE_RDMA_DCSP_EN_BIT_MASK) {
2637                 if (in_params->dscp_en > 1) {
2638                         DP_ERR(p_hwfn->p_dev, "invalid glob_dscp_en %d\n",
2639                                in_params->dscp_en);
2640                         return ECORE_INVAL;
2641                 }
2642                 glob_cfg.dscp_en = in_params->dscp_en;
2643         }
2644
2645         if (glob_cfg_bits & ECORE_RDMA_ECN_BIT_MASK) {
2646                 if (in_params->ecn > INET_ECN_ECT_0) {
2647                         DP_ERR(p_hwfn->p_dev, "invalid glob ecn %d\n",
2648                                in_params->ecn);
2649                         return ECORE_INVAL;
2650                 }
2651                 glob_cfg.ecn = in_params->ecn;
2652         }
2653
2654         if (glob_cfg_bits & ECORE_RDMA_ECN_EN_BIT_MASK) {
2655                 if (in_params->ecn_en > 1) {
2656                         DP_ERR(p_hwfn->p_dev, "invalid glob ecn en %d\n",
2657                                in_params->ecn_en);
2658                         return ECORE_INVAL;
2659                 }
2660                 glob_cfg.ecn_en = in_params->ecn_en;
2661         }
2662
2663         if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_BIT_MASK) {
2664                 if (in_params->vlan_pri > MAX_VLAN_PRIO) {
2665                         DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri %d\n",
2666                                in_params->vlan_pri);
2667                         return ECORE_INVAL;
2668                 }
2669                 glob_cfg.vlan_pri = in_params->vlan_pri;
2670         }
2671
2672         if (glob_cfg_bits & ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK) {
2673                 if (in_params->vlan_pri_en > 1) {
2674                         DP_ERR(p_hwfn->p_dev, "invalid glob vlan pri en %d\n",
2675                                in_params->vlan_pri_en);
2676                         return ECORE_INVAL;
2677                 }
2678                 glob_cfg.vlan_pri_en = in_params->vlan_pri_en;
2679         }
2680
2681         /* Write back local cfg to global */
2682         OSAL_MEMCPY(&p_hwfn->p_rdma_info->glob_cfg, &glob_cfg,
2683                     sizeof(glob_cfg));
2684
2685         return rc;
2686 }
2687
2688 enum _ecore_status_t
2689 ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
2690                         struct ecore_rdma_glob_cfg *out_params)
2691 {
2692         OSAL_MEMCPY(out_params, &p_hwfn->p_rdma_info->glob_cfg,
2693                     sizeof(struct ecore_rdma_glob_cfg));
2694
2695         return ECORE_SUCCESS;
2696 }
2697 #endif /* LINUX_REMOVE */