]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxr/qlnxr_verbs.c
Make iflib a loadable module.
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxr / qlnxr_verbs.c
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28
29 /*
30  * File: qlnxr_verbs.c
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "qlnxr_def.h"
36 #include "rdma_common.h"
37 #include "qlnxr_roce.h"
38 #include "qlnxr_cm.h"
39
40 #define upper_32_bits(x) (uint32_t)(x >> 32)
41 #define lower_32_bits(x) (uint32_t)(x)
42 #define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
43
44 #define TYPEPTR_ADDR_SET(type_ptr, field, vaddr)                        \
45         do {                                                            \
46                 (type_ptr)->field.hi = cpu_to_le32(upper_32_bits(vaddr));\
47                 (type_ptr)->field.lo = cpu_to_le32(lower_32_bits(vaddr));\
48         } while (0)
49
50
51 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
52         do {                                                    \
53                 TYPEPTR_ADDR_SET(sge, addr, vaddr);             \
54                 (sge)->length = cpu_to_le32(vlength);           \
55                 (sge)->flags = cpu_to_le32(vflags);             \
56         } while (0)
57
58 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
59         do {                                                    \
60                 TYPEPTR_ADDR_SET(hdr, wr_id, vwr_id);           \
61                 (hdr)->num_sges = num_sge;                      \
62         } while (0)
63
64 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
65         do {                                                    \
66                 TYPEPTR_ADDR_SET(sge, addr, vaddr);             \
67                 (sge)->length = cpu_to_le32(vlength);           \
68                 (sge)->l_key = cpu_to_le32(vlkey);              \
69         } while (0)
70
71 #define NIPQUAD(addr) \
72         ((unsigned char *)&addr)[0], \
73         ((unsigned char *)&addr)[1], \
74         ((unsigned char *)&addr)[2], \
75         ((unsigned char *)&addr)[3]
76
77 struct ib_srq *qlnxr_create_srq(struct ib_pd *,
78                 struct ib_srq_init_attr *,
79                 struct ib_udata *);
80
81 int qlnxr_destroy_srq(struct ib_srq *);
82
83 int qlnxr_modify_srq(struct ib_srq *,
84                 struct ib_srq_attr *,
85                 enum ib_srq_attr_mask,
86                 struct ib_udata *);
87 static int
88 qlnxr_check_srq_params(struct ib_pd *ibpd,
89         struct qlnxr_dev *dev,
90         struct ib_srq_init_attr *attrs);
91
92 static int
93 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
94         struct qlnxr_srq *srq,
95         struct qlnxr_create_srq_ureq *ureq,
96         int access, int dmasync);
97
98 static int
99 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
100         struct qlnxr_dev *dev,
101         struct ib_srq_init_attr *init_attr);
102
103 extern enum _ecore_status_t
104 ecore_rdma_modify_srq(void *rdma_cxt,
105         struct ecore_rdma_modify_srq_in_params *in_params);
106
107 extern enum _ecore_status_t
108 ecore_rdma_destroy_srq(void *rdma_cxt,
109         struct ecore_rdma_destroy_srq_in_params *in_params);
110
111 extern enum _ecore_status_t
112 ecore_rdma_create_srq(void *rdma_cxt,
113         struct ecore_rdma_create_srq_in_params *in_params,
114         struct ecore_rdma_create_srq_out_params *out_params);
115
116
117 static int
118 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
119         struct qlnxr_srq *srq,
120         struct ib_udata *udata);
121
122 static void
123 qlnxr_free_srq_user_params(struct qlnxr_srq *srq);
124
125 static void
126 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq);
127
128
129 static u32
130 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq);
131
132 int
133 qlnxr_iw_query_gid(struct ib_device *ibdev, u8 port, int index,
134         union ib_gid *sgid)
135 {
136         struct qlnxr_dev        *dev;
137         qlnx_host_t             *ha;
138
139         dev = get_qlnxr_dev(ibdev);
140         ha = dev->ha;
141
142         QL_DPRINT12(ha, "enter\n");
143
144         memset(sgid->raw, 0, sizeof(sgid->raw));
145
146         memcpy(sgid->raw, dev->ha->primary_mac, sizeof (dev->ha->primary_mac));
147
148         QL_DPRINT12(ha, "exit\n");
149
150         return 0;
151 }
152
153 int
154 qlnxr_query_gid(struct ib_device *ibdev, u8 port, int index,
155         union ib_gid *sgid)
156 {
157         struct qlnxr_dev        *dev;
158         qlnx_host_t             *ha;
159
160         dev = get_qlnxr_dev(ibdev);
161         ha = dev->ha;
162         QL_DPRINT12(ha, "enter index: %d\n", index);
163 #if 0
164         int ret = 0;
165         /* @@@: if DEFINE_ROCE_GID_TABLE to be used here */
166         //if (!rdma_cap_roce_gid_table(ibdev, port)) {
167         if (!(rdma_protocol_roce(ibdev, port) &&
168                 ibdev->add_gid && ibdev->del_gid)) {
169                 QL_DPRINT11(ha, "acquire gid failed\n");
170                 return -ENODEV;
171         }
172
173         ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
174         if (ret == -EAGAIN) {
175                 memcpy(sgid, &zgid, sizeof(*sgid));
176                 return 0;
177         }
178 #endif
179         if ((index >= QLNXR_MAX_SGID) || (index < 0)) {
180                 QL_DPRINT12(ha, "invalid gid index %d\n", index);
181                 memset(sgid, 0, sizeof(*sgid));
182                 return -EINVAL;
183         }
184         memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
185
186         QL_DPRINT12(ha, "exit : %p\n", sgid);
187
188         return 0;
189 }
190
191 struct ib_srq *
192 qlnxr_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *init_attr,
193         struct ib_udata *udata)
194 {
195         struct qlnxr_dev        *dev;
196         qlnx_host_t             *ha;
197         struct ecore_rdma_destroy_srq_in_params destroy_in_params;
198         struct ecore_rdma_create_srq_out_params out_params;
199         struct ecore_rdma_create_srq_in_params in_params;
200         u64 pbl_base_addr, phy_prod_pair_addr;
201         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
202         struct ib_ucontext *ib_ctx = NULL;
203         struct qlnxr_srq_hwq_info *hw_srq;
204         struct qlnxr_ucontext *ctx = NULL;
205         struct qlnxr_create_srq_ureq ureq;
206         u32 page_cnt, page_size;
207         struct qlnxr_srq *srq;
208         int ret = 0;
209
210         dev = get_qlnxr_dev((ibpd->device));
211         ha = dev->ha;
212
213         QL_DPRINT12(ha, "enter\n");
214
215         ret = qlnxr_check_srq_params(ibpd, dev, init_attr);
216
217         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
218         if (!srq) {
219                 QL_DPRINT11(ha, "cannot allocate memory for srq\n");
220                 return NULL; //@@@ : TODO what to return here?
221         }
222
223         srq->dev = dev;
224         hw_srq = &srq->hw_srq;
225         spin_lock_init(&srq->lock);
226         memset(&in_params, 0, sizeof(in_params));
227
228         if (udata && ibpd->uobject && ibpd->uobject->context) {
229                 ib_ctx = ibpd->uobject->context;
230                 ctx = get_qlnxr_ucontext(ib_ctx);
231
232                 memset(&ureq, 0, sizeof(ureq));
233                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
234                         udata->inlen))) {
235                         QL_DPRINT11(ha, "problem"
236                                 " copying data from user space\n");
237                         goto err0;
238                 }
239
240                 ret = qlnxr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
241                 if (ret)
242                         goto err0;
243
244                 page_cnt = srq->usrq.pbl_info.num_pbes;
245                 pbl_base_addr = srq->usrq.pbl_tbl->pa;
246                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
247                 // @@@ : if DEFINE_IB_UMEM_PAGE_SHIFT
248                 // page_size = BIT(srq->usrq.umem->page_shift);
249                 // else
250                 page_size = srq->usrq.umem->page_size;
251         } else {
252                 struct ecore_chain *pbl;
253                 ret = qlnxr_alloc_srq_kernel_params(srq, dev, init_attr);
254                 if (ret)
255                         goto err0;
256                 pbl = &hw_srq->pbl;
257
258                 page_cnt = ecore_chain_get_page_cnt(pbl);
259                 pbl_base_addr = ecore_chain_get_pbl_phys(pbl);
260                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
261                 page_size = pbl->elem_per_page << 4;
262         }
263
264         in_params.pd_id = pd->pd_id;
265         in_params.pbl_base_addr = pbl_base_addr;
266         in_params.prod_pair_addr = phy_prod_pair_addr;
267         in_params.num_pages = page_cnt;
268         in_params.page_size = page_size;
269
270         ret = ecore_rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
271         if (ret)
272                 goto err1;
273
274         srq->srq_id = out_params.srq_id;
275
276         if (udata) {
277                 ret = qlnxr_copy_srq_uresp(dev, srq, udata);
278                 if (ret)
279                         goto err2;
280         }
281
282         QL_DPRINT12(ha, "created srq with srq_id = 0x%0x\n", srq->srq_id);
283         return &srq->ibsrq;
284 err2:
285         memset(&in_params, 0, sizeof(in_params));
286         destroy_in_params.srq_id = srq->srq_id;
287         ecore_rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
288
289 err1:
290         if (udata)
291                 qlnxr_free_srq_user_params(srq);
292         else
293                 qlnxr_free_srq_kernel_params(srq);
294
295 err0:
296         kfree(srq);     
297         return ERR_PTR(-EFAULT);
298 }
299
300 int
301 qlnxr_destroy_srq(struct ib_srq *ibsrq)
302 {
303         struct qlnxr_dev        *dev;
304         struct qlnxr_srq        *srq;
305         qlnx_host_t             *ha;
306         struct ecore_rdma_destroy_srq_in_params in_params;
307
308         srq = get_qlnxr_srq(ibsrq);
309         dev = srq->dev;
310         ha = dev->ha;
311
312         memset(&in_params, 0, sizeof(in_params));
313         in_params.srq_id = srq->srq_id;
314
315         ecore_rdma_destroy_srq(dev->rdma_ctx, &in_params);
316
317         if (ibsrq->pd->uobject && ibsrq->pd->uobject->context)
318                 qlnxr_free_srq_user_params(srq);
319         else
320                 qlnxr_free_srq_kernel_params(srq);
321
322         QL_DPRINT12(ha, "destroyed srq_id=0x%0x\n", srq->srq_id);
323         kfree(srq);
324         return 0;
325 }
326
327 int
328 qlnxr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
329         enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
330 {
331         struct qlnxr_dev        *dev;
332         struct qlnxr_srq        *srq;
333         qlnx_host_t             *ha;
334         struct ecore_rdma_modify_srq_in_params in_params;
335         int ret = 0;
336
337         srq = get_qlnxr_srq(ibsrq);
338         dev = srq->dev;
339         ha = dev->ha;
340
341         QL_DPRINT12(ha, "enter\n");
342         if (attr_mask & IB_SRQ_MAX_WR) {
343                 QL_DPRINT12(ha, "invalid attribute mask=0x%x"
344                         " specified for %p\n", attr_mask, srq);
345                 return -EINVAL;
346         }
347
348         if (attr_mask & IB_SRQ_LIMIT) {
349                 if (attr->srq_limit >= srq->hw_srq.max_wr) {
350                         QL_DPRINT12(ha, "invalid srq_limit=0x%x"
351                                 " (max_srq_limit = 0x%x)\n",
352                                attr->srq_limit, srq->hw_srq.max_wr);
353                         return -EINVAL; 
354                 }
355                 memset(&in_params, 0, sizeof(in_params));
356                 in_params.srq_id = srq->srq_id;
357                 in_params.wqe_limit = attr->srq_limit;
358                 ret = ecore_rdma_modify_srq(dev->rdma_ctx, &in_params);
359                 if (ret)
360                         return ret;
361         }
362
363         QL_DPRINT12(ha, "modified srq with srq_id = 0x%0x\n", srq->srq_id);
364         return 0;
365 }
366
367 int
368 qlnxr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
369 {
370         struct qlnxr_dev        *dev;
371         struct qlnxr_srq        *srq;
372         qlnx_host_t             *ha;
373         struct ecore_rdma_device *qattr;
374         srq = get_qlnxr_srq(ibsrq);
375         dev = srq->dev;
376         ha = dev->ha;
377         //qattr = &dev->attr;
378         qattr = ecore_rdma_query_device(dev->rdma_ctx);
379         QL_DPRINT12(ha, "enter\n");
380
381         if (!dev->rdma_ctx) {
382                 QL_DPRINT12(ha, "called with invalid params"
383                         " rdma_ctx is NULL\n");
384                 return -EINVAL;
385         }
386
387         srq_attr->srq_limit = qattr->max_srq;
388         srq_attr->max_wr = qattr->max_srq_wr;
389         srq_attr->max_sge = qattr->max_sge;
390
391         QL_DPRINT12(ha, "exit\n");
392         return 0;
393 }
394
395 /* Increment srq wr producer by one */
396 static
397 void qlnxr_inc_srq_wr_prod (struct qlnxr_srq_hwq_info *info)
398 {
399         info->wr_prod_cnt++;
400 }
401
402 /* Increment srq wr consumer by one */
403 static 
404 void qlnxr_inc_srq_wr_cons(struct qlnxr_srq_hwq_info *info)
405 {
406         info->wr_cons_cnt++;
407 }
408
409 /* get_port_immutable verb is not available in FreeBSD */
410 #if 0
411 int
412 qlnxr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
413         struct ib_port_immutable *immutable)
414 {
415         struct qlnxr_dev                *dev;
416         qlnx_host_t                     *ha;
417         dev = get_qlnxr_dev(ibdev);
418         ha = dev->ha;
419
420         QL_DPRINT12(ha, "entered but not implemented!!!\n");
421 }
422 #endif
423
424 int
425 qlnxr_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
426         struct ib_recv_wr **bad_wr)
427 {
428         struct qlnxr_dev        *dev;
429         struct qlnxr_srq        *srq;
430         qlnx_host_t             *ha;
431         struct qlnxr_srq_hwq_info *hw_srq;
432         struct ecore_chain *pbl;
433         unsigned long flags;
434         int status = 0;
435         u32 num_sge, offset;
436
437         srq = get_qlnxr_srq(ibsrq);
438         dev = srq->dev;
439         ha = dev->ha;
440         hw_srq = &srq->hw_srq;
441
442         QL_DPRINT12(ha, "enter\n");
443         spin_lock_irqsave(&srq->lock, flags);
444
445         pbl = &srq->hw_srq.pbl;
446         while (wr) {
447                 struct rdma_srq_wqe_header *hdr;
448                 int i;
449
450                 if (!qlnxr_srq_elem_left(hw_srq) ||
451                     wr->num_sge > srq->hw_srq.max_sges) {
452                         QL_DPRINT11(ha, "WR cannot be posted"
453                             " (%d, %d) || (%d > %d)\n",
454                             hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
455                             wr->num_sge, srq->hw_srq.max_sges);
456                         status = -ENOMEM;
457                         *bad_wr = wr;
458                         break;
459                 }
460
461                 hdr = ecore_chain_produce(pbl);
462                 num_sge = wr->num_sge;
463                 /* Set number of sge and WR id in header */
464                 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
465
466                 /* PBL is maintained in case of WR granularity.
467                  * So increment WR producer in case we post a WR.
468                  */
469                 qlnxr_inc_srq_wr_prod(hw_srq);
470                 hw_srq->wqe_prod++;
471                 hw_srq->sge_prod++;
472
473                 QL_DPRINT12(ha, "SRQ WR : SGEs: %d with wr_id[%d] = %llx\n",
474                         wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
475
476                 for (i = 0; i < wr->num_sge; i++) {
477                         struct rdma_srq_sge *srq_sge = 
478                             ecore_chain_produce(pbl);
479                         /* Set SGE length, lkey and address */
480                         SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
481                                 wr->sg_list[i].length, wr->sg_list[i].lkey);
482
483                         QL_DPRINT12(ha, "[%d]: len %d, key %x, addr %x:%x\n",
484                                 i, srq_sge->length, srq_sge->l_key,
485                                 srq_sge->addr.hi, srq_sge->addr.lo);
486                         hw_srq->sge_prod++;
487                 }
488                 wmb();
489                 /*
490                  * SRQ prod is 8 bytes. Need to update SGE prod in index
491                  * in first 4 bytes and need to update WQE prod in next
492                  * 4 bytes.
493                  */
494                 *(srq->hw_srq.virt_prod_pair_addr) = hw_srq->sge_prod;
495                 offset = offsetof(struct rdma_srq_producers, wqe_prod);
496                 *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
497                         hw_srq->wqe_prod;
498                 /* Flush prod after updating it */
499                 wmb();
500                 wr = wr->next;
501         }       
502
503         QL_DPRINT12(ha, "Elements in SRQ: %d\n",
504                 ecore_chain_get_elem_left(pbl));
505
506         spin_unlock_irqrestore(&srq->lock, flags);      
507         QL_DPRINT12(ha, "exit\n");
508         return status;
509 }
510
511 int
512 #if __FreeBSD_version < 1102000
513 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
514 #else
515 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
516         struct ib_udata *udata)
517 #endif /* #if __FreeBSD_version < 1102000 */
518
519 {
520         struct qlnxr_dev                *dev;
521         struct ecore_rdma_device        *qattr;
522         qlnx_host_t                     *ha;
523
524         dev = get_qlnxr_dev(ibdev);
525         ha = dev->ha;
526
527         QL_DPRINT12(ha, "enter\n");
528
529 #if __FreeBSD_version > 1102000
530         if (udata->inlen || udata->outlen)
531                 return -EINVAL;
532 #endif /* #if __FreeBSD_version > 1102000 */
533
534         if (dev->rdma_ctx == NULL) {
535                 return -EINVAL;
536         }
537
538         qattr = ecore_rdma_query_device(dev->rdma_ctx);
539
540         memset(attr, 0, sizeof *attr);
541
542         attr->fw_ver = qattr->fw_ver;
543         attr->sys_image_guid = qattr->sys_image_guid;
544         attr->max_mr_size = qattr->max_mr_size;
545         attr->page_size_cap = qattr->page_size_caps;
546         attr->vendor_id = qattr->vendor_id;
547         attr->vendor_part_id = qattr->vendor_part_id;
548         attr->hw_ver = qattr->hw_ver;
549         attr->max_qp = qattr->max_qp;
550         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
551                                         IB_DEVICE_RC_RNR_NAK_GEN |
552                                         IB_DEVICE_LOCAL_DMA_LKEY |
553                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
554
555         attr->max_sge = qattr->max_sge;
556         attr->max_sge_rd = qattr->max_sge;
557         attr->max_cq = qattr->max_cq;
558         attr->max_cqe = qattr->max_cqe;
559         attr->max_mr = qattr->max_mr;
560         attr->max_mw = qattr->max_mw;
561         attr->max_pd = qattr->max_pd;
562         attr->atomic_cap = dev->atomic_cap;
563         attr->max_fmr = qattr->max_fmr;
564         attr->max_map_per_fmr = 16; /* TBD: FMR */
565
566         /* There is an implicit assumption in some of the ib_xxx apps that the
567          * qp_rd_atom is smaller than the qp_init_rd_atom. Specifically, in
568          * communication the qp_rd_atom is passed to the other side and used as
569          * init_rd_atom without check device capabilities for init_rd_atom.
570          * for this reason, we set the qp_rd_atom to be the minimum between the
571          * two...There is an additional assumption in mlx4 driver that the
572          * values are power of two, fls is performed on the value - 1, which
573          * in fact gives a larger power of two for values which are not a power
574          * of two. This should be fixed in mlx4 driver, but until then ->
575          * we provide a value that is a power of two in our code.
576          */
577         attr->max_qp_init_rd_atom =
578                 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
579         attr->max_qp_rd_atom =
580                 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
581                     attr->max_qp_init_rd_atom);
582
583         attr->max_srq = qattr->max_srq;
584         attr->max_srq_sge = qattr->max_srq_sge;
585         attr->max_srq_wr = qattr->max_srq_wr;
586
587         /* TODO: R&D to more properly configure the following */
588         attr->local_ca_ack_delay = qattr->dev_ack_delay;
589         attr->max_fast_reg_page_list_len = qattr->max_mr/8;
590         attr->max_pkeys = QLNXR_ROCE_PKEY_MAX;
591         attr->max_ah = qattr->max_ah;
592
593         QL_DPRINT12(ha, "exit\n");
594         return 0;
595 }
596
597 static inline void
598 get_link_speed_and_width(int speed, uint8_t *ib_speed, uint8_t *ib_width)
599 {
600         switch (speed) {
601         case 1000:
602                 *ib_speed = IB_SPEED_SDR;
603                 *ib_width = IB_WIDTH_1X;
604                 break;
605         case 10000:
606                 *ib_speed = IB_SPEED_QDR;
607                 *ib_width = IB_WIDTH_1X;
608                 break;
609
610         case 20000:
611                 *ib_speed = IB_SPEED_DDR;
612                 *ib_width = IB_WIDTH_4X;
613                 break;
614
615         case 25000:
616                 *ib_speed = IB_SPEED_EDR;
617                 *ib_width = IB_WIDTH_1X;
618                 break;
619
620         case 40000:
621                 *ib_speed = IB_SPEED_QDR;
622                 *ib_width = IB_WIDTH_4X;
623                 break;
624
625         case 50000:
626                 *ib_speed = IB_SPEED_QDR;
627                 *ib_width = IB_WIDTH_4X; // TODO doesn't add up to 50...
628                 break;
629
630         case 100000:
631                 *ib_speed = IB_SPEED_EDR;
632                 *ib_width = IB_WIDTH_4X;
633                 break;
634
635         default:
636                 /* Unsupported */
637                 *ib_speed = IB_SPEED_SDR;
638                 *ib_width = IB_WIDTH_1X;
639         }
640         return;
641 }
642
643 int
644 qlnxr_query_port(struct ib_device *ibdev, uint8_t port,
645         struct ib_port_attr *attr)
646 {
647         struct qlnxr_dev        *dev;
648         struct ecore_rdma_port  *rdma_port;
649         qlnx_host_t             *ha;
650
651         dev = get_qlnxr_dev(ibdev);
652         ha = dev->ha;
653
654         QL_DPRINT12(ha, "enter\n");
655
656         if (port > 1) {
657                 QL_DPRINT12(ha, "port [%d] > 1 \n", port);
658                 return -EINVAL;
659         }
660
661         if (dev->rdma_ctx == NULL) {
662                 QL_DPRINT12(ha, "rdma_ctx == NULL\n");
663                 return -EINVAL;
664         }
665
666         rdma_port = ecore_rdma_query_port(dev->rdma_ctx);
667         memset(attr, 0, sizeof *attr);
668
669         if (rdma_port->port_state == ECORE_RDMA_PORT_UP) {
670                 attr->state = IB_PORT_ACTIVE;
671                 attr->phys_state = 5;
672         } else {
673                 attr->state = IB_PORT_DOWN;
674                 attr->phys_state = 3;
675         }
676
677         attr->max_mtu = IB_MTU_4096;
678         attr->active_mtu = iboe_get_mtu(dev->ha->ifp->if_mtu);
679         attr->lid = 0;
680         attr->lmc = 0;
681         attr->sm_lid = 0;
682         attr->sm_sl = 0;
683         attr->port_cap_flags = 0;
684
685         if (QLNX_IS_IWARP(dev)) {
686                 attr->gid_tbl_len = 1;
687                 attr->pkey_tbl_len = 1;
688         } else {
689                 attr->gid_tbl_len = QLNXR_MAX_SGID;
690                 attr->pkey_tbl_len = QLNXR_ROCE_PKEY_TABLE_LEN;
691         }
692
693         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
694         attr->qkey_viol_cntr = 0;
695
696         get_link_speed_and_width(rdma_port->link_speed,
697                                  &attr->active_speed, &attr->active_width);
698
699         attr->max_msg_sz = rdma_port->max_msg_size;
700         attr->max_vl_num = 4; /* TODO -> figure this one out... */
701
702         QL_DPRINT12(ha, "state = %d phys_state = %d "
703                 " link_speed = %d active_speed = %d active_width = %d"
704                 " attr->gid_tbl_len = %d attr->pkey_tbl_len = %d"
705                 " max_msg_sz = 0x%x max_vl_num = 0x%x \n",
706                 attr->state, attr->phys_state,
707                 rdma_port->link_speed, attr->active_speed,
708                 attr->active_width, attr->gid_tbl_len, attr->pkey_tbl_len,
709                 attr->max_msg_sz, attr->max_vl_num);
710
711         QL_DPRINT12(ha, "exit\n");
712         return 0;
713 }
714
715 int
716 qlnxr_modify_port(struct ib_device *ibdev, uint8_t port, int mask,
717         struct ib_port_modify *props)
718 {
719         struct qlnxr_dev        *dev;
720         qlnx_host_t             *ha;
721
722         dev = get_qlnxr_dev(ibdev);
723         ha = dev->ha;
724
725         QL_DPRINT12(ha, "enter\n");
726
727         if (port > 1) {
728                 QL_DPRINT12(ha, "port (%d) > 1\n", port);
729                 return -EINVAL;
730         }
731
732         QL_DPRINT12(ha, "exit\n");
733         return 0;
734 }
735
736 enum rdma_link_layer
737 qlnxr_link_layer(struct ib_device *ibdev, uint8_t port_num)
738 {
739         struct qlnxr_dev        *dev;
740         qlnx_host_t             *ha;
741
742         dev = get_qlnxr_dev(ibdev);
743         ha = dev->ha;
744
745         QL_DPRINT12(ha, "ibdev = %p port_num = 0x%x\n", ibdev, port_num);
746
747         return IB_LINK_LAYER_ETHERNET;
748 }
749
750 struct ib_pd *
751 qlnxr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context,
752         struct ib_udata *udata)
753 {
754         struct qlnxr_pd         *pd = NULL;
755         u16                     pd_id;
756         int                     rc;
757         struct qlnxr_dev        *dev;
758         qlnx_host_t             *ha;
759
760         dev = get_qlnxr_dev(ibdev);
761         ha = dev->ha;
762
763         QL_DPRINT12(ha, "ibdev = %p context = %p"
764                 " udata = %p enter\n", ibdev, context, udata);
765
766         if (dev->rdma_ctx == NULL) {
767                 QL_DPRINT11(ha, "dev->rdma_ctx = NULL\n");
768                 rc = -1;
769                 goto err;
770         }
771
772         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
773         if (!pd) {
774                 rc = -ENOMEM;
775                 QL_DPRINT11(ha, "kzalloc(pd) = NULL\n");
776                 goto err;
777         }
778
779         rc = ecore_rdma_alloc_pd(dev->rdma_ctx, &pd_id);
780         if (rc) {
781                 QL_DPRINT11(ha, "ecore_rdma_alloc_pd failed\n");
782                 goto err;
783         }
784
785         pd->pd_id = pd_id;
786
787         if (udata && context) {
788
789                 rc = ib_copy_to_udata(udata, &pd->pd_id, sizeof(pd->pd_id));
790                 if (rc) {
791                         QL_DPRINT11(ha, "ib_copy_to_udata failed\n");
792                         ecore_rdma_free_pd(dev->rdma_ctx, pd_id);
793                         goto err;
794                 }
795
796                 pd->uctx = get_qlnxr_ucontext(context);
797                 pd->uctx->pd = pd;
798         }
799
800         atomic_add_rel_32(&dev->pd_count, 1);
801         QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
802                 pd, pd_id, dev->pd_count);
803
804         return &pd->ibpd;
805
806 err:
807         kfree(pd);
808         QL_DPRINT12(ha, "exit -1\n");
809         return ERR_PTR(rc);
810 }
811
812 int
813 qlnxr_dealloc_pd(struct ib_pd *ibpd)
814 {
815         struct qlnxr_pd         *pd;
816         struct qlnxr_dev        *dev;
817         qlnx_host_t             *ha;
818
819         pd = get_qlnxr_pd(ibpd);
820         dev = get_qlnxr_dev((ibpd->device));
821         ha = dev->ha;
822
823         QL_DPRINT12(ha, "enter\n");
824
825         if (pd == NULL) {
826                 QL_DPRINT11(ha, "pd = NULL\n");
827         } else {
828                 ecore_rdma_free_pd(dev->rdma_ctx, pd->pd_id);
829                 kfree(pd);
830                 atomic_subtract_rel_32(&dev->pd_count, 1);
831                 QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
832                         pd, pd->pd_id, dev->pd_count);
833         }
834
835         QL_DPRINT12(ha, "exit\n");
836         return 0;
837 }
838
839 #define ROCE_WQE_ELEM_SIZE      sizeof(struct rdma_sq_sge)
840 #define RDMA_MAX_SGE_PER_SRQ    (4) /* Should be part of HSI */
841 /* Should be part of HSI */
842 #define RDMA_MAX_SRQ_WQE_SIZE   (RDMA_MAX_SGE_PER_SRQ + 1) /* +1 for header */
843 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
844
845 static void qlnxr_cleanup_user(struct qlnxr_dev *, struct qlnxr_qp *);
846 static void qlnxr_cleanup_kernel(struct qlnxr_dev *, struct qlnxr_qp *);
847
848 int
849 qlnxr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
850 {
851         struct qlnxr_dev        *dev;
852         qlnx_host_t             *ha;
853
854         dev = get_qlnxr_dev(ibdev);
855         ha = dev->ha;
856
857         QL_DPRINT12(ha, "enter index = 0x%x\n", index);
858
859         if (index > QLNXR_ROCE_PKEY_TABLE_LEN) 
860                 return -EINVAL;
861
862         *pkey = QLNXR_ROCE_PKEY_DEFAULT;
863
864         QL_DPRINT12(ha, "exit\n");
865         return 0;
866 }
867
868
869 static inline bool
870 qlnxr_get_vlan_id_qp(qlnx_host_t *ha, struct ib_qp_attr *attr, int attr_mask,
871        u16 *vlan_id)
872 {
873         bool ret = false;
874
875         QL_DPRINT12(ha, "enter \n");
876
877         *vlan_id = 0;
878
879 #if __FreeBSD_version >= 1100000
880         u16 tmp_vlan_id;
881
882 #if __FreeBSD_version >= 1102000
883         union ib_gid *dgid;
884
885         dgid = &attr->ah_attr.grh.dgid;
886         tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
887
888         if (!(tmp_vlan_id & ~EVL_VLID_MASK)) {
889                 *vlan_id = tmp_vlan_id;
890                 ret = true;
891         }
892 #else
893         tmp_vlan_id = attr->vlan_id;
894
895         if ((attr_mask & IB_QP_VID) && (!(tmp_vlan_id & ~EVL_VLID_MASK))) {
896                 *vlan_id = tmp_vlan_id;
897                 ret = true;
898         }
899
900 #endif /* #if __FreeBSD_version > 1102000 */
901
902 #else
903         ret = true;
904
905 #endif /* #if __FreeBSD_version >= 1100000 */
906
907         QL_DPRINT12(ha, "exit vlan_id = 0x%x ret = %d \n", *vlan_id, ret);
908
909         return (ret);
910 }
911
912 static inline void
913 get_gid_info(struct ib_qp *ibqp, struct ib_qp_attr *attr,
914         int attr_mask,
915         struct qlnxr_dev *dev,
916         struct qlnxr_qp *qp,
917         struct ecore_rdma_modify_qp_in_params *qp_params)
918 {
919         int             i;
920         qlnx_host_t     *ha;
921
922         ha = dev->ha;
923
924         QL_DPRINT12(ha, "enter\n");
925
926         memcpy(&qp_params->sgid.bytes[0],
927                &dev->sgid_tbl[qp->sgid_idx].raw[0],
928                sizeof(qp_params->sgid.bytes));
929         memcpy(&qp_params->dgid.bytes[0],
930                &attr->ah_attr.grh.dgid.raw[0],
931                sizeof(qp_params->dgid));
932
933         qlnxr_get_vlan_id_qp(ha, attr, attr_mask, &qp_params->vlan_id);
934
935         for (i = 0; i < (sizeof(qp_params->sgid.dwords)/sizeof(uint32_t)); i++) {
936                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
937                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
938         }
939
940         QL_DPRINT12(ha, "exit\n");
941         return;
942 }
943
944
945
946 static int
947 qlnxr_add_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
948 {
949         struct qlnxr_mm *mm;
950         qlnx_host_t     *ha;
951
952         ha = uctx->dev->ha;
953
954         QL_DPRINT12(ha, "enter\n");
955
956         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
957         if (mm == NULL) {
958                 QL_DPRINT11(ha, "mm = NULL\n");
959                 return -ENOMEM;
960         }
961
962         mm->key.phy_addr = phy_addr;
963
964         /* This function might be called with a length which is not a multiple
965          * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
966          * forces this granularity by increasing the requested size if needed.
967          * When qedr_mmap is called, it will search the list with the updated
968          * length as a key. To prevent search failures, the length is rounded up
969          * in advance to PAGE_SIZE.
970          */
971         mm->key.len = roundup(len, PAGE_SIZE);
972         INIT_LIST_HEAD(&mm->entry);
973
974         mutex_lock(&uctx->mm_list_lock);
975         list_add(&mm->entry, &uctx->mm_head);
976         mutex_unlock(&uctx->mm_list_lock);
977
978         QL_DPRINT12(ha, "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
979                 (unsigned long long)mm->key.phy_addr,
980                 (unsigned long)mm->key.len, uctx);
981
982         return 0;
983 }
984
985 static bool
986 qlnxr_search_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
987 {
988         bool            found = false;
989         struct qlnxr_mm *mm;
990         qlnx_host_t     *ha;
991
992         ha = uctx->dev->ha;
993
994         QL_DPRINT12(ha, "enter\n");
995
996         mutex_lock(&uctx->mm_list_lock);
997         list_for_each_entry(mm, &uctx->mm_head, entry) {
998                 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
999                         continue;
1000
1001                 found = true;
1002                 break;
1003         }
1004         mutex_unlock(&uctx->mm_list_lock);
1005
1006         QL_DPRINT12(ha,
1007                 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, found=%d\n",
1008                 mm->key.phy_addr, mm->key.len, uctx, found);
1009
1010         return found;
1011 }
1012
1013 struct
1014 ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
1015                 struct ib_udata *udata)
1016 {
1017         int rc;
1018         struct qlnxr_ucontext *ctx;
1019         struct qlnxr_alloc_ucontext_resp uresp;
1020         struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
1021         qlnx_host_t *ha = dev->ha;
1022         struct ecore_rdma_add_user_out_params oparams;
1023
1024         if (!udata) {
1025                 return ERR_PTR(-EFAULT);
1026         }
1027
1028         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1029         if (!ctx)
1030                 return ERR_PTR(-ENOMEM);
1031
1032         rc = ecore_rdma_add_user(dev->rdma_ctx, &oparams);
1033         if (rc) {
1034                 QL_DPRINT12(ha,
1035                         "Failed to allocate a DPI for a new RoCE application "
1036                         ",rc = %d. To overcome this, consider to increase "
1037                         "the number of DPIs, increase the doorbell BAR size "
1038                         "or just close unnecessary RoCE applications. In "
1039                         "order to increase the number of DPIs consult the "
1040                         "README\n", rc);
1041                 goto err;
1042         }
1043
1044         ctx->dpi = oparams.dpi;
1045         ctx->dpi_addr = oparams.dpi_addr;
1046         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
1047         ctx->dpi_size = oparams.dpi_size;
1048         INIT_LIST_HEAD(&ctx->mm_head);
1049         mutex_init(&ctx->mm_list_lock);
1050
1051         memset(&uresp, 0, sizeof(uresp));
1052         uresp.dpm_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, dpm_enabled)
1053                                 < udata->outlen ? dev->user_dpm_enabled : 0; //TODO: figure this out
1054         uresp.wids_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, wids_enabled)
1055                                 < udata->outlen ? 1 : 0; //TODO: figure this out
1056         uresp.wid_count = offsetof(struct qlnxr_alloc_ucontext_resp, wid_count)
1057                                 < udata->outlen ? oparams.wid_count : 0; //TODO: figure this out 
1058         uresp.db_pa = ctx->dpi_phys_addr;
1059         uresp.db_size = ctx->dpi_size;
1060         uresp.max_send_wr = dev->attr.max_sqe;
1061         uresp.max_recv_wr = dev->attr.max_rqe;
1062         uresp.max_srq_wr = dev->attr.max_srq_wr;
1063         uresp.sges_per_send_wr = QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
1064         uresp.sges_per_recv_wr = QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
1065         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
1066         uresp.max_cqes = QLNXR_MAX_CQES;
1067         
1068         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1069         if (rc)
1070                 goto err;
1071
1072         ctx->dev = dev;
1073
1074         rc = qlnxr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
1075         if (rc)
1076                 goto err;
1077         QL_DPRINT12(ha, "Allocated user context %p\n",
1078                 &ctx->ibucontext);
1079         
1080         return &ctx->ibucontext;
1081 err:
1082         kfree(ctx);
1083         return ERR_PTR(rc);
1084 }
1085
1086 int
1087 qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx)
1088 {
1089         struct qlnxr_ucontext *uctx = get_qlnxr_ucontext(ibctx);
1090         struct qlnxr_dev *dev = uctx->dev;
1091         qlnx_host_t *ha = dev->ha;
1092         struct qlnxr_mm *mm, *tmp;
1093         int status = 0;
1094
1095         QL_DPRINT12(ha, "Deallocating user context %p\n",
1096                         uctx);
1097
1098         if (dev) {
1099                 ecore_rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
1100         }
1101
1102         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
1103                 QL_DPRINT12(ha, "deleted addr= 0x%llx, len = 0x%lx for"
1104                                 " ctx=%p\n",
1105                                 mm->key.phy_addr, mm->key.len, uctx);
1106                 list_del(&mm->entry);
1107                 kfree(mm);
1108         }
1109         kfree(uctx);
1110         return status;
1111 }
1112
1113 int
1114 qlnxr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1115 {
1116         struct qlnxr_ucontext   *ucontext = get_qlnxr_ucontext(context);
1117         struct qlnxr_dev        *dev = get_qlnxr_dev((context->device));
1118         unsigned long           vm_page = vma->vm_pgoff << PAGE_SHIFT;
1119         u64                     unmapped_db;
1120         unsigned long           len = (vma->vm_end - vma->vm_start);
1121         int                     rc = 0;
1122         bool                    found;
1123         qlnx_host_t             *ha;
1124
1125         ha = dev->ha;
1126
1127 #if __FreeBSD_version > 1102000
1128         unmapped_db = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
1129 #else
1130         unmapped_db = dev->db_phys_addr;
1131 #endif /* #if __FreeBSD_version > 1102000 */
1132
1133         QL_DPRINT12(ha, "qedr_mmap enter vm_page=0x%lx"
1134                 " vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
1135                 vm_page, vma->vm_pgoff, unmapped_db,
1136                 dev->db_size, len);
1137
1138         if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
1139                 QL_DPRINT11(ha, "Vma_start not page aligned "
1140                         "vm_start = %ld vma_end = %ld\n", vma->vm_start,
1141                         vma->vm_end);
1142                 return -EINVAL;
1143         }
1144
1145         found = qlnxr_search_mmap(ucontext, vm_page, len);
1146         if (!found) {
1147                 QL_DPRINT11(ha, "Vma_pgoff not found in mapped array = %ld\n",
1148                         vma->vm_pgoff);
1149                 return -EINVAL;
1150         }
1151
1152         QL_DPRINT12(ha, "Mapping doorbell bar\n");
1153
1154 #if __FreeBSD_version > 1102000
1155
1156         if ((vm_page < unmapped_db) ||
1157                 ((vm_page + len) > (unmapped_db + ucontext->dpi_size))) {
1158                 QL_DPRINT11(ha, "failed pages are outside of dpi;"
1159                         "page address=0x%lx, unmapped_db=0x%lx, dpi_size=0x%x\n",
1160                         vm_page, unmapped_db, ucontext->dpi_size);
1161                 return -EINVAL;
1162         }
1163
1164         if (vma->vm_flags & VM_READ) {
1165                 QL_DPRINT11(ha, "failed mmap, cannot map doorbell bar for read\n");
1166                 return -EINVAL;
1167         }
1168
1169         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1170         rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
1171                         vma->vm_page_prot);
1172
1173 #else
1174
1175         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
1176                 dev->db_size))) {
1177
1178                 QL_DPRINT12(ha, "Mapping doorbell bar\n");
1179
1180                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1181
1182                 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1183                                             PAGE_SIZE, vma->vm_page_prot);
1184         } else {
1185                 QL_DPRINT12(ha, "Mapping chains\n");
1186                 rc = io_remap_pfn_range(vma, vma->vm_start,
1187                                          vma->vm_pgoff, len, vma->vm_page_prot);
1188         }
1189
1190 #endif /* #if __FreeBSD_version > 1102000 */
1191
1192         QL_DPRINT12(ha, "exit [%d]\n", rc);
1193         return rc;
1194 }
1195
1196 struct ib_mr *
1197 qlnxr_get_dma_mr(struct ib_pd *ibpd, int acc)
1198 {
1199         struct qlnxr_mr         *mr;
1200         struct qlnxr_dev        *dev = get_qlnxr_dev((ibpd->device));
1201         struct qlnxr_pd         *pd = get_qlnxr_pd(ibpd);
1202         int                     rc;
1203         qlnx_host_t             *ha;
1204
1205         ha = dev->ha;
1206
1207         QL_DPRINT12(ha, "enter\n");
1208
1209         if (acc & IB_ACCESS_MW_BIND) {
1210                 QL_DPRINT12(ha, "Unsupported access flags received for dma mr\n");
1211         }
1212
1213         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1214         if (!mr) {
1215                 rc = -ENOMEM;
1216                 QL_DPRINT12(ha, "kzalloc(mr) failed %d\n", rc);
1217                 goto err0;
1218         }
1219
1220         mr->type = QLNXR_MR_DMA;
1221
1222         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1223         if (rc) {
1224                 QL_DPRINT12(ha, "ecore_rdma_alloc_tid failed %d\n", rc);
1225                 goto err1;
1226         }
1227
1228         /* index only, 18 bit long, lkey = itid << 8 | key */
1229         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1230         mr->hw_mr.pd = pd->pd_id;
1231         mr->hw_mr.local_read = 1;
1232         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1233         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1234         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1235         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1236         mr->hw_mr.dma_mr = true;
1237
1238         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1239         if (rc) {
1240                 QL_DPRINT12(ha, "ecore_rdma_register_tid failed %d\n", rc);
1241                 goto err2;
1242         }
1243
1244         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1245
1246         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1247                 mr->hw_mr.remote_atomic) {
1248                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1249         }
1250
1251         QL_DPRINT12(ha, "lkey = %x\n", mr->ibmr.lkey);
1252
1253         return &mr->ibmr;
1254
1255 err2:
1256         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1257 err1:
1258         kfree(mr);
1259 err0:
1260         QL_DPRINT12(ha, "exit [%d]\n", rc);
1261
1262         return ERR_PTR(rc);
1263 }
1264
1265 static void
1266 qlnxr_free_pbl(struct qlnxr_dev *dev, struct qlnxr_pbl_info *pbl_info,
1267         struct qlnxr_pbl *pbl)
1268 {
1269         int             i;
1270         qlnx_host_t     *ha;
1271
1272         ha = dev->ha;
1273
1274         QL_DPRINT12(ha, "enter\n");
1275
1276         for (i = 0; i < pbl_info->num_pbls; i++) {
1277                 if (!pbl[i].va)
1278                         continue;
1279                 qlnx_dma_free_coherent(&dev->ha->cdev, pbl[i].va, pbl[i].pa,
1280                         pbl_info->pbl_size);
1281         }
1282         kfree(pbl);
1283
1284         QL_DPRINT12(ha, "exit\n");
1285         return;
1286 }
1287
1288 #define MIN_FW_PBL_PAGE_SIZE (4*1024)
1289 #define MAX_FW_PBL_PAGE_SIZE (64*1024)
1290
1291 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
1292 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
1293 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE*MAX_PBES_ON_PAGE)
1294
1295 static struct qlnxr_pbl *
1296 qlnxr_alloc_pbl_tbl(struct qlnxr_dev *dev,
1297         struct qlnxr_pbl_info *pbl_info, gfp_t flags)
1298 {
1299         void                    *va;
1300         dma_addr_t              pa;
1301         dma_addr_t              *pbl_main_tbl;
1302         struct qlnxr_pbl        *pbl_table;
1303         int                     i, rc = 0;
1304         qlnx_host_t             *ha;
1305
1306         ha = dev->ha;
1307
1308         QL_DPRINT12(ha, "enter\n");
1309
1310         pbl_table = kzalloc(sizeof(*pbl_table) * pbl_info->num_pbls, flags);
1311
1312         if (!pbl_table) {
1313                 QL_DPRINT12(ha, "pbl_table = NULL\n");
1314                 return NULL;
1315         }
1316
1317         for (i = 0; i < pbl_info->num_pbls; i++) {
1318                 va = qlnx_dma_alloc_coherent(&dev->ha->cdev, &pa, pbl_info->pbl_size);
1319                 if (!va) {
1320                         QL_DPRINT11(ha, "Failed to allocate pbl#%d\n", i);
1321                         rc = -ENOMEM;
1322                         goto err;
1323                 }
1324                 memset(va, 0, pbl_info->pbl_size);
1325                 pbl_table[i].va = va;
1326                 pbl_table[i].pa = pa;
1327         }
1328
1329         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
1330          * the first one with physical pointers to all of the rest
1331          */
1332         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
1333         for (i = 0; i < pbl_info->num_pbls - 1; i++)
1334                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
1335
1336         QL_DPRINT12(ha, "exit\n");
1337         return pbl_table;
1338
1339 err:
1340         qlnxr_free_pbl(dev, pbl_info, pbl_table);
1341
1342         QL_DPRINT12(ha, "exit with error\n");
1343         return NULL;
1344 }
1345
1346 static int
1347 qlnxr_prepare_pbl_tbl(struct qlnxr_dev *dev,
1348         struct qlnxr_pbl_info *pbl_info,
1349         u32 num_pbes,
1350         int two_layer_capable)
1351 {
1352         u32             pbl_capacity;
1353         u32             pbl_size;
1354         u32             num_pbls;
1355         qlnx_host_t     *ha;
1356
1357         ha = dev->ha;
1358
1359         QL_DPRINT12(ha, "enter\n");
1360
1361         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
1362                 if (num_pbes > MAX_PBES_TWO_LAYER) {
1363                         QL_DPRINT11(ha, "prepare pbl table: too many pages %d\n",
1364                                 num_pbes);
1365                         return -EINVAL;
1366                 }
1367
1368                 /* calculate required pbl page size */
1369                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
1370                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
1371                         NUM_PBES_ON_PAGE(pbl_size);
1372
1373                 while (pbl_capacity < num_pbes) {
1374                         pbl_size *= 2;
1375                         pbl_capacity = pbl_size / sizeof(u64);
1376                         pbl_capacity = pbl_capacity * pbl_capacity;
1377                 }
1378
1379                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
1380                 num_pbls++; /* One for the layer0 ( points to the pbls) */
1381                 pbl_info->two_layered = true;
1382         } else {
1383                 /* One layered PBL */
1384                 num_pbls = 1;
1385                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, \
1386                                 roundup_pow_of_two((num_pbes * sizeof(u64))));
1387                 pbl_info->two_layered = false;
1388         }
1389
1390         pbl_info->num_pbls = num_pbls;
1391         pbl_info->pbl_size = pbl_size;
1392         pbl_info->num_pbes = num_pbes;
1393
1394         QL_DPRINT12(ha, "prepare pbl table: num_pbes=%d, num_pbls=%d pbl_size=%d\n",
1395                 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
1396
1397         return 0;
1398 }
1399
1400 #define upper_32_bits(x) (uint32_t)(x >> 32)
1401 #define lower_32_bits(x) (uint32_t)(x)
1402
1403 static void
1404 qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
1405         struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
1406 {
1407         struct regpair          *pbe;
1408         struct qlnxr_pbl        *pbl_tbl;
1409         struct scatterlist      *sg;
1410         int                     shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
1411         qlnx_host_t             *ha;
1412
1413 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1414         int                     i;
1415         struct                  ib_umem_chunk *chunk = NULL;
1416 #else
1417         int                     entry;
1418 #endif
1419
1420
1421         ha = dev->ha;
1422
1423         QL_DPRINT12(ha, "enter\n");
1424
1425         if (!pbl_info) {
1426                 QL_DPRINT11(ha, "PBL_INFO not initialized\n");
1427                 return;
1428         }
1429
1430         if (!pbl_info->num_pbes) {
1431                 QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
1432                 return;
1433         }
1434
1435         /* If we have a two layered pbl, the first pbl points to the rest
1436          * of the pbls and the first entry lays on the second pbl in the table
1437          */
1438         if (pbl_info->two_layered)
1439                 pbl_tbl = &pbl[1];
1440         else
1441                 pbl_tbl = pbl;
1442
1443         pbe = (struct regpair *)pbl_tbl->va;
1444         if (!pbe) {
1445                 QL_DPRINT12(ha, "pbe is NULL\n");
1446                 return;
1447         }
1448
1449         pbe_cnt = 0;
1450
1451         shift = ilog2(umem->page_size);
1452
1453 #ifndef DEFINE_IB_UMEM_WITH_CHUNK
1454
1455         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
1456
1457 #else
1458         list_for_each_entry(chunk, &umem->chunk_list, list) {
1459                 /* get all the dma regions from the chunk. */
1460                 for (i = 0; i < chunk->nmap; i++) {
1461                         sg = &chunk->page_list[i];
1462 #endif
1463                         pages = sg_dma_len(sg) >> shift;
1464                         for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
1465                                 /* store the page address in pbe */
1466                                 pbe->lo =
1467                                     cpu_to_le32(sg_dma_address(sg) +
1468                                                 (umem->page_size * pg_cnt));
1469                                 pbe->hi =
1470                                     cpu_to_le32(upper_32_bits
1471                                                 ((sg_dma_address(sg) +
1472                                                   umem->page_size * pg_cnt)));
1473
1474                                 QL_DPRINT12(ha,
1475                                         "Populate pbl table:"
1476                                         " pbe->addr=0x%x:0x%x "
1477                                         " pbe_cnt = %d total_num_pbes=%d"
1478                                         " pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
1479                                         total_num_pbes, pbe);
1480
1481                                 pbe_cnt ++;
1482                                 total_num_pbes ++;
1483                                 pbe++;
1484
1485                                 if (total_num_pbes == pbl_info->num_pbes)
1486                                         return;
1487
1488                                 /* if the given pbl is full storing the pbes,
1489                                  * move to next pbl.
1490                                  */
1491                                 if (pbe_cnt ==
1492                                         (pbl_info->pbl_size / sizeof(u64))) {
1493                                         pbl_tbl++;
1494                                         pbe = (struct regpair *)pbl_tbl->va;
1495                                         pbe_cnt = 0;
1496                                 }
1497                         }
1498 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1499                 }
1500 #endif
1501         }
1502         QL_DPRINT12(ha, "exit\n");
1503         return;
1504 }
1505
1506 static void
1507 free_mr_info(struct qlnxr_dev *dev, struct mr_info *info)
1508 {
1509         struct qlnxr_pbl *pbl, *tmp;
1510         qlnx_host_t             *ha;
1511
1512         ha = dev->ha;
1513
1514         QL_DPRINT12(ha, "enter\n");
1515
1516         if (info->pbl_table)
1517                 list_add_tail(&info->pbl_table->list_entry,
1518                               &info->free_pbl_list);
1519
1520         if (!list_empty(&info->inuse_pbl_list))
1521                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
1522
1523         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
1524                 list_del(&pbl->list_entry);
1525                 qlnxr_free_pbl(dev, &info->pbl_info, pbl);
1526         }
1527         QL_DPRINT12(ha, "exit\n");
1528
1529         return;
1530 }
1531
1532 static int
1533 qlnxr_init_mr_info(struct qlnxr_dev *dev, struct mr_info *info,
1534         size_t page_list_len, bool two_layered)
1535 {
1536         int                     rc;
1537         struct qlnxr_pbl        *tmp;
1538         qlnx_host_t             *ha;
1539
1540         ha = dev->ha;
1541
1542         QL_DPRINT12(ha, "enter\n");
1543
1544         INIT_LIST_HEAD(&info->free_pbl_list);
1545         INIT_LIST_HEAD(&info->inuse_pbl_list);
1546
1547         rc = qlnxr_prepare_pbl_tbl(dev, &info->pbl_info,
1548                                   page_list_len, two_layered);
1549         if (rc) {
1550                 QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl [%d]\n", rc);
1551                 goto done;
1552         }
1553
1554         info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1555
1556         if (!info->pbl_table) {
1557                 rc = -ENOMEM;
1558                 QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl returned NULL\n");
1559                 goto done;
1560         }
1561
1562         QL_DPRINT12(ha, "pbl_table_pa = %pa\n", &info->pbl_table->pa);
1563
1564         /* in usual case we use 2 PBLs, so we add one to free
1565          * list and allocating another one
1566          */
1567         tmp = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1568
1569         if (!tmp) {
1570                 QL_DPRINT11(ha, "Extra PBL is not allocated\n");
1571                 goto done; /* it's OK if second allocation fails, so rc = 0*/
1572         }
1573
1574         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
1575
1576         QL_DPRINT12(ha, "extra pbl_table_pa = %pa\n", &tmp->pa);
1577
1578 done:
1579         if (rc)
1580                 free_mr_info(dev, info);
1581
1582         QL_DPRINT12(ha, "exit [%d]\n", rc);
1583
1584         return rc;
1585 }
1586
1587
1588 struct ib_mr *
1589 #if __FreeBSD_version >= 1102000
1590 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1591         u64 usr_addr, int acc, struct ib_udata *udata)
1592 #else
1593 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1594         u64 usr_addr, int acc, struct ib_udata *udata, int mr_id)
1595 #endif /* #if __FreeBSD_version >= 1102000 */
1596 {
1597         int             rc = -ENOMEM;
1598         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
1599         struct qlnxr_mr *mr;
1600         struct qlnxr_pd *pd;
1601         qlnx_host_t     *ha;
1602
1603         ha = dev->ha;
1604
1605         QL_DPRINT12(ha, "enter\n");
1606
1607         pd = get_qlnxr_pd(ibpd);
1608
1609         QL_DPRINT12(ha, "qedr_register user mr pd = %d"
1610                 " start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
1611                 pd->pd_id, start, len, usr_addr, acc);
1612
1613         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
1614                 QL_DPRINT11(ha,
1615                         "(acc & IB_ACCESS_REMOTE_WRITE &&"
1616                         " !(acc & IB_ACCESS_LOCAL_WRITE))\n");
1617                 return ERR_PTR(-EINVAL);
1618         }
1619
1620         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1621         if (!mr) {
1622                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
1623                 return ERR_PTR(rc);
1624         }
1625
1626         mr->type = QLNXR_MR_USER;
1627
1628         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
1629         if (IS_ERR(mr->umem)) {
1630                 rc = -EFAULT;
1631                 QL_DPRINT11(ha, "ib_umem_get failed [%p]\n", mr->umem);
1632                 goto err0;
1633         }
1634
1635         rc = qlnxr_init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
1636         if (rc) {
1637                 QL_DPRINT11(ha,
1638                         "qlnxr_init_mr_info failed [%d]\n", rc);
1639                 goto err1;
1640         }
1641
1642         qlnxr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
1643                            &mr->info.pbl_info);
1644
1645         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1646
1647         if (rc) {
1648                 QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
1649                 goto err1;
1650         }
1651
1652         /* index only, 18 bit long, lkey = itid << 8 | key */
1653         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1654         mr->hw_mr.key = 0;
1655         mr->hw_mr.pd = pd->pd_id;
1656         mr->hw_mr.local_read = 1;
1657         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1658         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1659         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1660         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1661         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
1662         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
1663         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
1664         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
1665         mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); /* for the MR pages */
1666
1667 #if __FreeBSD_version >= 1102000
1668         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
1669 #else
1670         mr->hw_mr.fbo = mr->umem->offset;
1671 #endif
1672         mr->hw_mr.length = len;
1673         mr->hw_mr.vaddr = usr_addr;
1674         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
1675         mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
1676         mr->hw_mr.dma_mr = false;
1677
1678         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1679         if (rc) {
1680                 QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
1681                 goto err2;
1682         }
1683
1684         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1685         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1686                 mr->hw_mr.remote_atomic)
1687                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1688
1689         QL_DPRINT12(ha, "register user mr lkey: %x\n", mr->ibmr.lkey);
1690
1691         return (&mr->ibmr);
1692
1693 err2:
1694         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1695 err1:
1696         qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1697 err0:
1698         kfree(mr);
1699
1700         QL_DPRINT12(ha, "exit [%d]\n", rc);
1701         return (ERR_PTR(rc));
1702 }
1703
1704 int
1705 qlnxr_dereg_mr(struct ib_mr *ib_mr)
1706 {
1707         struct qlnxr_mr *mr = get_qlnxr_mr(ib_mr);
1708         struct qlnxr_dev *dev = get_qlnxr_dev((ib_mr->device));
1709         int             rc = 0;
1710         qlnx_host_t     *ha;
1711
1712         ha = dev->ha;
1713
1714         QL_DPRINT12(ha, "enter\n");
1715
1716         if ((mr->type != QLNXR_MR_DMA) && (mr->type != QLNXR_MR_FRMR))
1717                 qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1718
1719         /* it could be user registered memory. */
1720         if (mr->umem)
1721                 ib_umem_release(mr->umem);
1722
1723         kfree(mr->pages);
1724
1725         kfree(mr);
1726
1727         QL_DPRINT12(ha, "exit\n");
1728         return rc;
1729 }
1730
1731 static int
1732 qlnxr_copy_cq_uresp(struct qlnxr_dev *dev,
1733         struct qlnxr_cq *cq, struct ib_udata *udata)
1734 {
1735         struct qlnxr_create_cq_uresp    uresp;
1736         int                             rc;
1737         qlnx_host_t                     *ha;
1738
1739         ha = dev->ha;
1740
1741         QL_DPRINT12(ha, "enter\n");
1742
1743         memset(&uresp, 0, sizeof(uresp));
1744
1745         uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1746         uresp.icid = cq->icid;
1747
1748         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1749
1750         if (rc) {
1751                 QL_DPRINT12(ha, "ib_copy_to_udata error cqid=0x%x[%d]\n",
1752                         cq->icid, rc);
1753         }
1754
1755         QL_DPRINT12(ha, "exit [%d]\n", rc);
1756         return rc;
1757 }
1758
1759 static void
1760 consume_cqe(struct qlnxr_cq *cq)
1761 {
1762
1763         if (cq->latest_cqe == cq->toggle_cqe)
1764                 cq->pbl_toggle ^= RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1765
1766         cq->latest_cqe = ecore_chain_consume(&cq->pbl);
1767 }
1768
1769 static inline int
1770 qlnxr_align_cq_entries(int entries)
1771 {
1772         u64 size, aligned_size;
1773
1774         /* We allocate an extra entry that we don't report to the FW.
1775          * Why?
1776          * The CQE size is 32 bytes but the FW writes in chunks of 64 bytes
1777          * (for performance purposes). Allocating an extra entry and telling
1778          * the FW we have less prevents overwriting the first entry in case of
1779          * a wrap i.e. when the FW writes the last entry and the application
1780          * hasn't read the first one.
1781          */
1782         size = (entries + 1) * QLNXR_CQE_SIZE;
1783
1784         /* We align to PAGE_SIZE.
1785          * Why?
1786          * Since the CQ is going to be mapped and the mapping is anyhow in whole
1787          * kernel pages we benefit from the possibly extra CQEs.
1788          */
1789         aligned_size = ALIGN(size, PAGE_SIZE);
1790
1791         /* note: for CQs created in user space the result of this function
1792          * should match the size mapped in user space
1793          */
1794         return (aligned_size / QLNXR_CQE_SIZE);
1795 }
1796
1797 static inline int
1798 qlnxr_init_user_queue(struct ib_ucontext *ib_ctx, struct qlnxr_dev *dev,
1799         struct qlnxr_userq *q, u64 buf_addr, size_t buf_len,
1800         int access, int dmasync, int alloc_and_init)
1801 {
1802         int             page_cnt;
1803         int             rc;
1804         qlnx_host_t     *ha;
1805
1806         ha = dev->ha;
1807
1808         QL_DPRINT12(ha, "enter\n");
1809
1810         q->buf_addr = buf_addr;
1811         q->buf_len = buf_len;
1812
1813         QL_DPRINT12(ha, "buf_addr : %llx, buf_len : %x, access : %x"
1814               " dmasync : %x\n", q->buf_addr, q->buf_len,
1815                 access, dmasync);       
1816
1817         q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
1818
1819         if (IS_ERR(q->umem)) {
1820                 QL_DPRINT11(ha, "ib_umem_get failed [%lx]\n", PTR_ERR(q->umem));
1821                 return PTR_ERR(q->umem);
1822         }
1823
1824         page_cnt = ib_umem_page_count(q->umem);
1825         rc = qlnxr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt,
1826                                   0 /* SQ and RQ don't support dual layer pbl.
1827                                      * CQ may, but this is yet uncoded.
1828                                      */);
1829         if (rc) {
1830                 QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl failed [%d]\n", rc);
1831                 goto err;
1832         }
1833
1834         if (alloc_and_init) {
1835                 q->pbl_tbl = qlnxr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
1836
1837                 if (!q->pbl_tbl) {
1838                         QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1839                         rc = -ENOMEM;
1840                         goto err;
1841                 }
1842
1843                 qlnxr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
1844         } else {
1845                 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
1846
1847                 if (!q->pbl_tbl) {
1848                         QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1849                         rc = -ENOMEM;
1850                         goto err;
1851                 }
1852         }
1853
1854         QL_DPRINT12(ha, "exit\n");
1855         return 0;
1856
1857 err:
1858         ib_umem_release(q->umem);
1859         q->umem = NULL;
1860
1861         QL_DPRINT12(ha, "exit [%d]\n", rc);
1862         return rc;
1863 }
1864
1865 #if __FreeBSD_version >= 1102000
1866
1867 struct ib_cq *
1868 qlnxr_create_cq(struct ib_device *ibdev,
1869         const struct ib_cq_init_attr *attr,
1870         struct ib_ucontext *ib_ctx,
1871         struct ib_udata *udata)
1872
1873 #else 
1874
1875 #if __FreeBSD_version >= 1100000
1876
1877 struct ib_cq *
1878 qlnxr_create_cq(struct ib_device *ibdev,
1879         struct ib_cq_init_attr *attr,
1880         struct ib_ucontext *ib_ctx,
1881         struct ib_udata *udata)
1882
1883 #else
1884
1885 struct ib_cq *
1886 qlnxr_create_cq(struct ib_device *ibdev,
1887         int entries,
1888         int vector,
1889         struct ib_ucontext *ib_ctx,
1890         struct ib_udata *udata)
1891 #endif /* #if __FreeBSD_version >= 1100000 */
1892
1893 #endif /* #if __FreeBSD_version >= 1102000 */
1894 {
1895         struct qlnxr_ucontext                   *ctx;
1896         struct ecore_rdma_destroy_cq_out_params destroy_oparams;
1897         struct ecore_rdma_destroy_cq_in_params  destroy_iparams;
1898         struct qlnxr_dev                        *dev;
1899         struct ecore_rdma_create_cq_in_params   params;
1900         struct qlnxr_create_cq_ureq             ureq;
1901
1902 #if __FreeBSD_version >= 1100000
1903         int                                     vector = attr->comp_vector;
1904         int                                     entries = attr->cqe;
1905 #endif
1906         struct qlnxr_cq                         *cq;
1907         int                                     chain_entries, rc, page_cnt;
1908         u64                                     pbl_ptr;
1909         u16                                     icid;
1910         qlnx_host_t                             *ha;
1911
1912         dev = get_qlnxr_dev(ibdev);
1913         ha = dev->ha;
1914
1915         QL_DPRINT12(ha, "called from %s. entries = %d, "
1916                 "vector = %d\n",
1917                 (udata ? "User Lib" : "Kernel"), entries, vector);
1918
1919         memset(&params, 0, sizeof(struct ecore_rdma_create_cq_in_params));
1920         memset(&destroy_iparams, 0, sizeof(struct ecore_rdma_destroy_cq_in_params));
1921         memset(&destroy_oparams, 0, sizeof(struct ecore_rdma_destroy_cq_out_params));
1922
1923         if (entries > QLNXR_MAX_CQES) {
1924                 QL_DPRINT11(ha,
1925                         "the number of entries %d is too high. "
1926                         "Must be equal or below %d.\n",
1927                         entries, QLNXR_MAX_CQES);
1928                 return ERR_PTR(-EINVAL);
1929         }
1930         chain_entries = qlnxr_align_cq_entries(entries);
1931         chain_entries = min_t(int, chain_entries, QLNXR_MAX_CQES);
1932
1933         cq = qlnx_zalloc((sizeof(struct qlnxr_cq)));
1934
1935         if (!cq)
1936                 return ERR_PTR(-ENOMEM);
1937
1938         if (udata) {
1939                 memset(&ureq, 0, sizeof(ureq));
1940
1941                 if (ib_copy_from_udata(&ureq, udata,
1942                         min(sizeof(ureq), udata->inlen))) {
1943                         QL_DPRINT11(ha, "ib_copy_from_udata failed\n");
1944                         goto err0;
1945                 }
1946
1947                 if (!ureq.len) {
1948                         QL_DPRINT11(ha, "ureq.len == 0\n");
1949                         goto err0;
1950                 }
1951
1952                 cq->cq_type = QLNXR_CQ_TYPE_USER;
1953
1954                 qlnxr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr, ureq.len,
1955                                      IB_ACCESS_LOCAL_WRITE, 1, 1);
1956
1957                 pbl_ptr = cq->q.pbl_tbl->pa;
1958                 page_cnt = cq->q.pbl_info.num_pbes;
1959                 cq->ibcq.cqe = chain_entries;
1960         } else {
1961                 cq->cq_type = QLNXR_CQ_TYPE_KERNEL;
1962
1963                 rc = ecore_chain_alloc(&dev->ha->cdev,
1964                            ECORE_CHAIN_USE_TO_CONSUME,
1965                            ECORE_CHAIN_MODE_PBL,
1966                            ECORE_CHAIN_CNT_TYPE_U32,
1967                            chain_entries,
1968                            sizeof(union roce_cqe),
1969                            &cq->pbl, NULL);
1970
1971                 if (rc)
1972                         goto err1;
1973
1974                 page_cnt = ecore_chain_get_page_cnt(&cq->pbl);
1975                 pbl_ptr = ecore_chain_get_pbl_phys(&cq->pbl);
1976                 cq->ibcq.cqe = cq->pbl.capacity;
1977         }
1978
1979         params.cq_handle_hi = upper_32_bits((uintptr_t)cq);
1980         params.cq_handle_lo = lower_32_bits((uintptr_t)cq);
1981         params.cnq_id = vector;
1982         params.cq_size = chain_entries - 1;
1983         params.pbl_num_pages = page_cnt;
1984         params.pbl_ptr = pbl_ptr;
1985         params.pbl_two_level = 0;
1986
1987         if (ib_ctx != NULL) {
1988                 ctx = get_qlnxr_ucontext(ib_ctx);
1989                 params.dpi = ctx->dpi;
1990         } else {
1991                 params.dpi = dev->dpi;
1992         }
1993
1994         rc = ecore_rdma_create_cq(dev->rdma_ctx, &params, &icid);
1995         if (rc)
1996                 goto err2;
1997
1998         cq->icid = icid;
1999         cq->sig = QLNXR_CQ_MAGIC_NUMBER;
2000         spin_lock_init(&cq->cq_lock);
2001
2002         if (ib_ctx) {
2003                 rc = qlnxr_copy_cq_uresp(dev, cq, udata);
2004                 if (rc)
2005                         goto err3;
2006         } else {
2007                 /* Generate doorbell address.
2008                  * Configure bits 3-9 with DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT.
2009                  * TODO: consider moving to device scope as it is a function of
2010                  *       the device.
2011                  * TODO: add ifdef if plan to support 16 bit.
2012                  */
2013                 cq->db_addr = dev->db_addr +
2014                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
2015                 cq->db.data.icid = cq->icid;
2016                 cq->db.data.params = DB_AGG_CMD_SET <<
2017                                      RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
2018
2019                 /* point to the very last element, passing it we will toggle */
2020                 cq->toggle_cqe = ecore_chain_get_last_elem(&cq->pbl);
2021                 cq->pbl_toggle = RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
2022
2023                 /* must be different from pbl_toggle */
2024                 cq->latest_cqe = NULL;
2025                 consume_cqe(cq);
2026                 cq->cq_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
2027         }
2028
2029         QL_DPRINT12(ha, "exit icid = 0x%0x, addr = %p,"
2030                 " number of entries = 0x%x\n",
2031                 cq->icid, cq, params.cq_size);
2032         QL_DPRINT12(ha,"cq_addr = %p\n", cq);
2033         return &cq->ibcq;
2034
2035 err3:
2036         destroy_iparams.icid = cq->icid;
2037         ecore_rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, &destroy_oparams);
2038 err2:
2039         if (udata)
2040                 qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2041         else
2042                 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2043 err1:
2044         if (udata)
2045                 ib_umem_release(cq->q.umem);
2046 err0:
2047         kfree(cq);
2048
2049         QL_DPRINT12(ha, "exit error\n");
2050
2051         return ERR_PTR(-EINVAL);
2052 }
2053
2054 int qlnxr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
2055 {
2056         int                     status = 0;
2057         struct qlnxr_dev        *dev = get_qlnxr_dev((ibcq->device));
2058         qlnx_host_t             *ha;
2059
2060         ha = dev->ha;
2061
2062         QL_DPRINT12(ha, "enter/exit\n");
2063
2064         return status;
2065 }
2066
2067 int
2068 qlnxr_destroy_cq(struct ib_cq *ibcq)
2069 {
2070         struct qlnxr_dev                        *dev = get_qlnxr_dev((ibcq->device));
2071         struct ecore_rdma_destroy_cq_out_params oparams;
2072         struct ecore_rdma_destroy_cq_in_params  iparams;
2073         struct qlnxr_cq                         *cq = get_qlnxr_cq(ibcq);
2074         int                                     rc = 0;
2075         qlnx_host_t                             *ha;
2076
2077         ha = dev->ha;
2078
2079         QL_DPRINT12(ha, "enter cq_id = %d\n", cq->icid);
2080
2081         cq->destroyed = 1;
2082
2083         /* TODO: Syncronize irq of the CNQ the CQ belongs to for validation
2084          * that all completions with notification are dealt with. The rest
2085          * of the completions are not interesting
2086          */
2087
2088         /* GSIs CQs are handled by driver, so they don't exist in the FW */
2089
2090         if (cq->cq_type != QLNXR_CQ_TYPE_GSI) {
2091
2092                 iparams.icid = cq->icid;
2093
2094                 rc = ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
2095
2096                 if (rc) {
2097                         QL_DPRINT12(ha, "ecore_rdma_destroy_cq failed cq_id = %d\n",
2098                                 cq->icid);
2099                         return rc;
2100                 }
2101
2102                 QL_DPRINT12(ha, "free cq->pbl cq_id = %d\n", cq->icid);
2103                 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2104         }
2105
2106         if (ibcq->uobject && ibcq->uobject->context) {
2107                 qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2108                 ib_umem_release(cq->q.umem);
2109         }
2110
2111         cq->sig = ~cq->sig;
2112
2113         kfree(cq);
2114
2115         QL_DPRINT12(ha, "exit cq_id = %d\n", cq->icid);
2116
2117         return rc;
2118 }
2119
2120 static int
2121 qlnxr_check_qp_attrs(struct ib_pd *ibpd,
2122         struct qlnxr_dev *dev,
2123         struct ib_qp_init_attr *attrs,
2124         struct ib_udata *udata)
2125 {
2126         struct ecore_rdma_device        *qattr;
2127         qlnx_host_t                     *ha;
2128
2129         qattr = ecore_rdma_query_device(dev->rdma_ctx);
2130         ha = dev->ha;
2131
2132         QL_DPRINT12(ha, "enter\n");
2133
2134         QL_DPRINT12(ha, "attrs->sq_sig_type = %d\n", attrs->sq_sig_type);
2135         QL_DPRINT12(ha, "attrs->qp_type = %d\n", attrs->qp_type);
2136         QL_DPRINT12(ha, "attrs->create_flags = %d\n", attrs->create_flags);
2137
2138 #if __FreeBSD_version < 1102000
2139         QL_DPRINT12(ha, "attrs->qpg_type = %d\n", attrs->qpg_type);
2140 #endif
2141
2142         QL_DPRINT12(ha, "attrs->port_num = %d\n", attrs->port_num);
2143         QL_DPRINT12(ha, "attrs->cap.max_send_wr = 0x%x\n", attrs->cap.max_send_wr);
2144         QL_DPRINT12(ha, "attrs->cap.max_recv_wr = 0x%x\n", attrs->cap.max_recv_wr);
2145         QL_DPRINT12(ha, "attrs->cap.max_send_sge = 0x%x\n", attrs->cap.max_send_sge);
2146         QL_DPRINT12(ha, "attrs->cap.max_recv_sge = 0x%x\n", attrs->cap.max_recv_sge);
2147         QL_DPRINT12(ha, "attrs->cap.max_inline_data = 0x%x\n",
2148                 attrs->cap.max_inline_data);
2149
2150 #if __FreeBSD_version < 1102000
2151         QL_DPRINT12(ha, "attrs->cap.qpg_tss_mask_sz = 0x%x\n",
2152                 attrs->cap.qpg_tss_mask_sz);
2153 #endif
2154
2155         QL_DPRINT12(ha, "\n\nqattr->vendor_id = 0x%x\n", qattr->vendor_id);
2156         QL_DPRINT12(ha, "qattr->vendor_part_id = 0x%x\n", qattr->vendor_part_id);
2157         QL_DPRINT12(ha, "qattr->hw_ver = 0x%x\n", qattr->hw_ver);
2158         QL_DPRINT12(ha, "qattr->fw_ver = %p\n", (void *)qattr->fw_ver);
2159         QL_DPRINT12(ha, "qattr->node_guid = %p\n", (void *)qattr->node_guid);
2160         QL_DPRINT12(ha, "qattr->sys_image_guid = %p\n",
2161                 (void *)qattr->sys_image_guid);
2162         QL_DPRINT12(ha, "qattr->max_cnq = 0x%x\n", qattr->max_cnq);
2163         QL_DPRINT12(ha, "qattr->max_sge = 0x%x\n", qattr->max_sge);
2164         QL_DPRINT12(ha, "qattr->max_srq_sge = 0x%x\n", qattr->max_srq_sge);
2165         QL_DPRINT12(ha, "qattr->max_inline = 0x%x\n", qattr->max_inline);
2166         QL_DPRINT12(ha, "qattr->max_wqe = 0x%x\n", qattr->max_wqe);
2167         QL_DPRINT12(ha, "qattr->max_srq_wqe = 0x%x\n", qattr->max_srq_wqe);
2168         QL_DPRINT12(ha, "qattr->max_qp_resp_rd_atomic_resc = 0x%x\n",
2169                 qattr->max_qp_resp_rd_atomic_resc);
2170         QL_DPRINT12(ha, "qattr->max_qp_req_rd_atomic_resc = 0x%x\n",
2171                 qattr->max_qp_req_rd_atomic_resc);
2172         QL_DPRINT12(ha, "qattr->max_dev_resp_rd_atomic_resc = 0x%x\n",
2173                 qattr->max_dev_resp_rd_atomic_resc);
2174         QL_DPRINT12(ha, "qattr->max_cq = 0x%x\n", qattr->max_cq);
2175         QL_DPRINT12(ha, "qattr->max_qp = 0x%x\n", qattr->max_qp);
2176         QL_DPRINT12(ha, "qattr->max_srq = 0x%x\n", qattr->max_srq);
2177         QL_DPRINT12(ha, "qattr->max_mr = 0x%x\n", qattr->max_mr);
2178         QL_DPRINT12(ha, "qattr->max_mr_size = %p\n", (void *)qattr->max_mr_size);
2179         QL_DPRINT12(ha, "qattr->max_cqe = 0x%x\n", qattr->max_cqe);
2180         QL_DPRINT12(ha, "qattr->max_mw = 0x%x\n", qattr->max_mw);
2181         QL_DPRINT12(ha, "qattr->max_fmr = 0x%x\n", qattr->max_fmr);
2182         QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_pbl = 0x%x\n",
2183                 qattr->max_mr_mw_fmr_pbl);
2184         QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_size = %p\n",
2185                 (void *)qattr->max_mr_mw_fmr_size);
2186         QL_DPRINT12(ha, "qattr->max_pd = 0x%x\n", qattr->max_pd);
2187         QL_DPRINT12(ha, "qattr->max_ah = 0x%x\n", qattr->max_ah);
2188         QL_DPRINT12(ha, "qattr->max_pkey = 0x%x\n", qattr->max_pkey);
2189         QL_DPRINT12(ha, "qattr->max_srq_wr = 0x%x\n", qattr->max_srq_wr);
2190         QL_DPRINT12(ha, "qattr->max_stats_queues = 0x%x\n",
2191                 qattr->max_stats_queues);
2192         //QL_DPRINT12(ha, "qattr->dev_caps = 0x%x\n", qattr->dev_caps);
2193         QL_DPRINT12(ha, "qattr->page_size_caps = %p\n",
2194                 (void *)qattr->page_size_caps);
2195         QL_DPRINT12(ha, "qattr->dev_ack_delay = 0x%x\n", qattr->dev_ack_delay);
2196         QL_DPRINT12(ha, "qattr->reserved_lkey = 0x%x\n", qattr->reserved_lkey);
2197         QL_DPRINT12(ha, "qattr->bad_pkey_counter = 0x%x\n",
2198                 qattr->bad_pkey_counter);
2199
2200         if ((attrs->qp_type == IB_QPT_GSI) && udata) {
2201                 QL_DPRINT12(ha, "unexpected udata when creating GSI QP\n");
2202                 return -EINVAL;
2203         }
2204
2205         if (udata && !(ibpd->uobject && ibpd->uobject->context)) {
2206                 QL_DPRINT12(ha, "called from user without context\n");
2207                 return -EINVAL;
2208         }
2209
2210         /* QP0... attrs->qp_type == IB_QPT_GSI */
2211         if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
2212                 QL_DPRINT12(ha, "unsupported qp type=0x%x requested\n", 
2213                            attrs->qp_type);
2214                 return -EINVAL;
2215         }
2216         if (attrs->qp_type == IB_QPT_GSI && attrs->srq) {
2217                 QL_DPRINT12(ha, "cannot create GSI qp with SRQ\n");
2218                 return -EINVAL;
2219         }
2220         /* Skip the check for QP1 to support CM size of 128 */
2221         if (attrs->cap.max_send_wr > qattr->max_wqe) {
2222                 QL_DPRINT12(ha, "cannot create a SQ with %d elements "
2223                         " (max_send_wr=0x%x)\n",
2224                         attrs->cap.max_send_wr, qattr->max_wqe);
2225                 return -EINVAL;
2226         }
2227         if (!attrs->srq && (attrs->cap.max_recv_wr > qattr->max_wqe)) {
2228                 QL_DPRINT12(ha, "cannot create a RQ with %d elements"
2229                         " (max_recv_wr=0x%x)\n",
2230                         attrs->cap.max_recv_wr, qattr->max_wqe);
2231                 return -EINVAL;
2232         }
2233         if (attrs->cap.max_inline_data > qattr->max_inline) {
2234                 QL_DPRINT12(ha,
2235                         "unsupported inline data size=0x%x "
2236                         "requested (max_inline=0x%x)\n",
2237                         attrs->cap.max_inline_data, qattr->max_inline);
2238                 return -EINVAL;
2239         }
2240         if (attrs->cap.max_send_sge > qattr->max_sge) {
2241                 QL_DPRINT12(ha,
2242                         "unsupported send_sge=0x%x "
2243                         "requested (max_send_sge=0x%x)\n",
2244                         attrs->cap.max_send_sge, qattr->max_sge);
2245                 return -EINVAL;
2246         }
2247         if (attrs->cap.max_recv_sge > qattr->max_sge) {
2248                 QL_DPRINT12(ha,
2249                         "unsupported recv_sge=0x%x requested "
2250                         " (max_recv_sge=0x%x)\n",
2251                         attrs->cap.max_recv_sge, qattr->max_sge);
2252                 return -EINVAL;
2253         }
2254         /* unprivileged user space cannot create special QP */
2255         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
2256                 QL_DPRINT12(ha,
2257                         "userspace can't create special QPs of type=0x%x\n",
2258                         attrs->qp_type);
2259                 return -EINVAL;
2260         }
2261         /* allow creating only one GSI type of QP */
2262         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
2263                 QL_DPRINT12(ha,
2264                         "create qp: GSI special QPs already created.\n");
2265                 return -EINVAL;
2266         }
2267
2268         /* verify consumer QPs are not trying to use GSI QP's CQ */
2269         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
2270                 struct qlnxr_cq *send_cq = get_qlnxr_cq(attrs->send_cq);
2271                 struct qlnxr_cq *recv_cq = get_qlnxr_cq(attrs->recv_cq);
2272
2273                 if ((send_cq->cq_type == QLNXR_CQ_TYPE_GSI) ||
2274                     (recv_cq->cq_type == QLNXR_CQ_TYPE_GSI)) {
2275                         QL_DPRINT11(ha, "consumer QP cannot use GSI CQs.\n");
2276                         return -EINVAL;
2277                 }
2278         }
2279         QL_DPRINT12(ha, "exit\n");
2280         return 0;
2281 }
2282
2283 static int
2284 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
2285         struct qlnxr_srq *srq,
2286         struct ib_udata *udata)
2287 {
2288         struct qlnxr_create_srq_uresp   uresp;
2289         qlnx_host_t                     *ha;
2290         int                             rc;
2291
2292         ha = dev->ha;
2293
2294         QL_DPRINT12(ha, "enter\n");
2295
2296         memset(&uresp, 0, sizeof(uresp));
2297
2298         uresp.srq_id = srq->srq_id;
2299
2300         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2301
2302         QL_DPRINT12(ha, "exit [%d]\n", rc);
2303         return rc;
2304 }
2305
2306 static void
2307 qlnxr_copy_rq_uresp(struct qlnxr_dev *dev,
2308         struct qlnxr_create_qp_uresp *uresp,
2309         struct qlnxr_qp *qp)
2310 {
2311         qlnx_host_t     *ha;
2312
2313         ha = dev->ha;
2314
2315         /* Return if QP is associated with SRQ instead of RQ */
2316         QL_DPRINT12(ha, "enter qp->srq = %p\n", qp->srq);
2317
2318         if (qp->srq)
2319                 return;
2320
2321         /* iWARP requires two doorbells per RQ. */
2322         if (QLNX_IS_IWARP(dev)) {
2323
2324                 uresp->rq_db_offset =
2325                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2326                 uresp->rq_db2_offset =
2327                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2328
2329                 QL_DPRINT12(ha, "uresp->rq_db_offset = 0x%x "
2330                         "uresp->rq_db2_offset = 0x%x\n",
2331                         uresp->rq_db_offset, uresp->rq_db2_offset);
2332         } else {
2333                 uresp->rq_db_offset =
2334                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2335         }
2336         uresp->rq_icid = qp->icid;
2337
2338         QL_DPRINT12(ha, "exit\n");
2339         return;
2340 }
2341
2342 static void
2343 qlnxr_copy_sq_uresp(struct qlnxr_dev *dev,
2344         struct qlnxr_create_qp_uresp *uresp,
2345         struct qlnxr_qp *qp)
2346 {
2347         qlnx_host_t     *ha;
2348
2349         ha = dev->ha;
2350
2351         QL_DPRINT12(ha, "enter\n");
2352
2353         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2354
2355         /* iWARP uses the same cid for rq and sq*/
2356         if (QLNX_IS_IWARP(dev)) {
2357                 uresp->sq_icid = qp->icid;
2358                 QL_DPRINT12(ha, "uresp->sq_icid = 0x%x\n", uresp->sq_icid);
2359         } else
2360                 uresp->sq_icid = qp->icid + 1;
2361
2362         QL_DPRINT12(ha, "exit\n");
2363         return;
2364 }
2365
2366 static int
2367 qlnxr_copy_qp_uresp(struct qlnxr_dev *dev,
2368         struct qlnxr_qp *qp,
2369         struct ib_udata *udata)
2370 {
2371         int                             rc;
2372         struct qlnxr_create_qp_uresp    uresp;
2373         qlnx_host_t                     *ha;
2374
2375         ha = dev->ha;
2376
2377         QL_DPRINT12(ha, "enter qp->icid =0x%x\n", qp->icid);
2378
2379         memset(&uresp, 0, sizeof(uresp));
2380         qlnxr_copy_sq_uresp(dev, &uresp, qp);
2381         qlnxr_copy_rq_uresp(dev, &uresp, qp);
2382
2383         uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
2384         uresp.qp_id = qp->qp_id;
2385
2386         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2387
2388         QL_DPRINT12(ha, "exit [%d]\n", rc);
2389         return rc;
2390 }
2391
2392
2393 static void
2394 qlnxr_set_common_qp_params(struct qlnxr_dev *dev,
2395         struct qlnxr_qp *qp,
2396         struct qlnxr_pd *pd,
2397         struct ib_qp_init_attr *attrs)
2398 {
2399         qlnx_host_t                     *ha;
2400
2401         ha = dev->ha;
2402
2403         QL_DPRINT12(ha, "enter\n");
2404
2405         spin_lock_init(&qp->q_lock);
2406
2407         atomic_set(&qp->refcnt, 1);
2408         qp->pd = pd;
2409         qp->sig = QLNXR_QP_MAGIC_NUMBER;
2410         qp->qp_type = attrs->qp_type;
2411         qp->max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2412         qp->sq.max_sges = attrs->cap.max_send_sge;
2413         qp->state = ECORE_ROCE_QP_STATE_RESET;
2414         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
2415         qp->sq_cq = get_qlnxr_cq(attrs->send_cq);
2416         qp->rq_cq = get_qlnxr_cq(attrs->recv_cq);
2417         qp->dev = dev;
2418
2419         if (!attrs->srq) {
2420                 /* QP is associated with RQ instead of SRQ */
2421                 qp->rq.max_sges = attrs->cap.max_recv_sge;
2422                 QL_DPRINT12(ha, "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
2423                         qp->rq.max_sges, qp->rq_cq->icid);
2424         } else {
2425                 qp->srq = get_qlnxr_srq(attrs->srq);
2426         }
2427
2428         QL_DPRINT12(ha,
2429                 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d,"
2430                 " state = %d, signaled = %d, use_srq=%d\n",
2431                 pd->pd_id, qp->qp_type, qp->max_inline_data,
2432                 qp->state, qp->signaled, ((attrs->srq) ? 1 : 0));
2433         QL_DPRINT12(ha, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
2434                 qp->sq.max_sges, qp->sq_cq->icid);
2435         return;
2436 }
2437
2438 static int
2439 qlnxr_check_srq_params(struct ib_pd *ibpd,
2440         struct qlnxr_dev *dev,
2441         struct ib_srq_init_attr *attrs)
2442 {
2443         struct ecore_rdma_device *qattr;
2444         qlnx_host_t             *ha;
2445
2446         ha = dev->ha;
2447         qattr = ecore_rdma_query_device(dev->rdma_ctx);
2448
2449         QL_DPRINT12(ha, "enter\n");
2450
2451         if (attrs->attr.max_wr > qattr->max_srq_wqe) {
2452                 QL_DPRINT12(ha, "unsupported srq_wr=0x%x"
2453                         " requested (max_srq_wr=0x%x)\n",
2454                         attrs->attr.max_wr, qattr->max_srq_wr);
2455                 return -EINVAL;
2456         }
2457
2458         if (attrs->attr.max_sge > qattr->max_sge) {
2459                 QL_DPRINT12(ha,
2460                         "unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
2461                         attrs->attr.max_sge, qattr->max_sge);
2462                 return -EINVAL;
2463         }
2464
2465         if (attrs->attr.srq_limit > attrs->attr.max_wr) {
2466                 QL_DPRINT12(ha,
2467                        "unsupported srq_limit=0x%x requested"
2468                         " (max_srq_limit=0x%x)\n",
2469                         attrs->attr.srq_limit, attrs->attr.srq_limit);
2470                 return -EINVAL;
2471         }
2472
2473         QL_DPRINT12(ha, "exit\n");
2474         return 0;
2475 }
2476
2477
2478 static void
2479 qlnxr_free_srq_user_params(struct qlnxr_srq *srq)
2480 {
2481         struct qlnxr_dev        *dev = srq->dev;
2482         qlnx_host_t             *ha;
2483
2484         ha = dev->ha;
2485
2486         QL_DPRINT12(ha, "enter\n");
2487
2488         qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2489         ib_umem_release(srq->usrq.umem);
2490         ib_umem_release(srq->prod_umem);
2491
2492         QL_DPRINT12(ha, "exit\n");
2493         return;
2494 }
2495
2496 static void
2497 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq)
2498 {
2499         struct qlnxr_srq_hwq_info *hw_srq  = &srq->hw_srq;
2500         struct qlnxr_dev        *dev = srq->dev;
2501         qlnx_host_t             *ha;
2502
2503         ha = dev->ha;
2504
2505         QL_DPRINT12(ha, "enter\n");
2506
2507         ecore_chain_free(dev->cdev, &hw_srq->pbl);
2508
2509         qlnx_dma_free_coherent(&dev->cdev,
2510                 hw_srq->virt_prod_pair_addr,
2511                 hw_srq->phy_prod_pair_addr,
2512                 sizeof(struct rdma_srq_producers));
2513
2514         QL_DPRINT12(ha, "exit\n");
2515
2516         return;
2517 }
2518
2519 static int
2520 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
2521         struct qlnxr_srq *srq,
2522         struct qlnxr_create_srq_ureq *ureq,
2523         int access, int dmasync)
2524 {
2525 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2526         struct ib_umem_chunk    *chunk;
2527 #endif
2528         struct scatterlist      *sg;
2529         int                     rc;
2530         struct qlnxr_dev        *dev = srq->dev;
2531         qlnx_host_t             *ha;
2532
2533         ha = dev->ha;
2534
2535         QL_DPRINT12(ha, "enter\n");
2536
2537         rc = qlnxr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
2538                                   ureq->srq_len, access, dmasync, 1);
2539         if (rc)
2540                 return rc;
2541
2542         srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
2543                                      sizeof(struct rdma_srq_producers),
2544                                      access, dmasync);
2545         if (IS_ERR(srq->prod_umem)) {
2546
2547                 qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2548                 ib_umem_release(srq->usrq.umem);
2549
2550                 QL_DPRINT12(ha, "ib_umem_get failed for producer [%p]\n",
2551                         PTR_ERR(srq->prod_umem));
2552
2553                 return PTR_ERR(srq->prod_umem);
2554         }
2555
2556 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2557         chunk = container_of((&srq->prod_umem->chunk_list)->next,
2558                              typeof(*chunk), list);
2559         sg = &chunk->page_list[0];
2560 #else
2561         sg = srq->prod_umem->sg_head.sgl;
2562 #endif
2563         srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
2564
2565         QL_DPRINT12(ha, "exit\n");
2566         return 0;
2567 }
2568
2569
2570 static int
2571 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
2572         struct qlnxr_dev *dev,
2573         struct ib_srq_init_attr *init_attr)
2574 {
2575         struct qlnxr_srq_hwq_info       *hw_srq  = &srq->hw_srq;
2576         dma_addr_t                      phy_prod_pair_addr;
2577         u32                             num_elems, max_wr;
2578         void                            *va;
2579         int                             rc;
2580         qlnx_host_t                     *ha;
2581
2582         ha = dev->ha;
2583
2584         QL_DPRINT12(ha, "enter\n");
2585
2586         va = qlnx_dma_alloc_coherent(&dev->cdev,
2587                         &phy_prod_pair_addr,
2588                         sizeof(struct rdma_srq_producers));
2589         if (!va) {
2590                 QL_DPRINT11(ha, "qlnx_dma_alloc_coherent failed for produceer\n");
2591                 return -ENOMEM;
2592         }
2593
2594         hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
2595         hw_srq->virt_prod_pair_addr = va;
2596
2597         max_wr = init_attr->attr.max_wr;
2598
2599         num_elems = max_wr * RDMA_MAX_SRQ_WQE_SIZE;
2600
2601         rc = ecore_chain_alloc(dev->cdev,
2602                    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2603                    ECORE_CHAIN_MODE_PBL,
2604                    ECORE_CHAIN_CNT_TYPE_U32,
2605                    num_elems,
2606                    ECORE_RDMA_SRQ_WQE_ELEM_SIZE,
2607                    &hw_srq->pbl, NULL);
2608
2609         if (rc) {
2610                 QL_DPRINT11(ha, "ecore_chain_alloc failed [%d]\n", rc);
2611                 goto err0;
2612         }
2613
2614         hw_srq->max_wr = max_wr;
2615         hw_srq->num_elems = num_elems;
2616         hw_srq->max_sges = RDMA_MAX_SGE_PER_SRQ;
2617
2618         QL_DPRINT12(ha, "exit\n");
2619         return 0;
2620
2621 err0:
2622         qlnx_dma_free_coherent(&dev->cdev, va, phy_prod_pair_addr,
2623                 sizeof(struct rdma_srq_producers));
2624
2625         QL_DPRINT12(ha, "exit [%d]\n", rc);
2626         return rc;
2627 }
2628
2629 static inline void
2630 qlnxr_init_common_qp_in_params(struct qlnxr_dev *dev,
2631         struct qlnxr_pd *pd,
2632         struct qlnxr_qp *qp,
2633         struct ib_qp_init_attr *attrs,
2634         bool fmr_and_reserved_lkey,
2635         struct ecore_rdma_create_qp_in_params *params)
2636 {
2637         qlnx_host_t     *ha;
2638
2639         ha = dev->ha;
2640
2641         QL_DPRINT12(ha, "enter\n");
2642
2643         /* QP handle to be written in an async event */
2644         params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
2645         params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
2646
2647         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
2648         params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
2649         params->pd = pd->pd_id;
2650         params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
2651         params->sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
2652         params->stats_queue = 0;
2653
2654         params->rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
2655
2656         if (qp->srq) {
2657                 /* QP is associated with SRQ instead of RQ */
2658                 params->srq_id = qp->srq->srq_id;
2659                 params->use_srq = true;
2660                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
2661                         params->srq_id, params->use_srq);
2662                 return;
2663         }
2664
2665         params->srq_id = 0;
2666         params->use_srq = false;
2667
2668         QL_DPRINT12(ha, "exit\n");
2669         return;
2670 }
2671
2672
2673 static inline void
2674 qlnxr_qp_user_print( struct qlnxr_dev *dev,
2675         struct qlnxr_qp *qp)
2676 {
2677         QL_DPRINT12((dev->ha), "qp=%p. sq_addr=0x%llx, sq_len=%zd, "
2678                 "rq_addr=0x%llx, rq_len=%zd\n",
2679                 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
2680                 qp->urq.buf_len);
2681         return;
2682 }
2683
2684 static int
2685 qlnxr_idr_add(struct qlnxr_dev *dev, void *ptr, u32 id)
2686 {
2687         u32             newid;
2688         int             rc;
2689         qlnx_host_t     *ha;
2690
2691         ha = dev->ha;
2692
2693         QL_DPRINT12(ha, "enter\n");
2694
2695         if (!QLNX_IS_IWARP(dev))
2696                 return 0;
2697
2698         do {
2699                 if (!idr_pre_get(&dev->qpidr, GFP_KERNEL)) {
2700                         QL_DPRINT11(ha, "idr_pre_get failed\n");
2701                         return -ENOMEM;
2702                 }
2703
2704                 mtx_lock(&dev->idr_lock);
2705
2706                 rc = idr_get_new_above(&dev->qpidr, ptr, id, &newid);
2707
2708                 mtx_unlock(&dev->idr_lock);
2709
2710         } while (rc == -EAGAIN);
2711
2712         QL_DPRINT12(ha, "exit [%d]\n", rc);
2713
2714         return rc;
2715 }
2716
2717 static void
2718 qlnxr_idr_remove(struct qlnxr_dev *dev, u32 id)
2719 {
2720         qlnx_host_t     *ha;
2721
2722         ha = dev->ha;
2723
2724         QL_DPRINT12(ha, "enter\n");
2725
2726         if (!QLNX_IS_IWARP(dev))
2727                 return;
2728
2729         mtx_lock(&dev->idr_lock);
2730         idr_remove(&dev->qpidr, id);
2731         mtx_unlock(&dev->idr_lock);
2732
2733         QL_DPRINT12(ha, "exit \n");
2734
2735         return;
2736 }
2737
2738 static inline void
2739 qlnxr_iwarp_populate_user_qp(struct qlnxr_dev *dev,
2740         struct qlnxr_qp *qp,
2741         struct ecore_rdma_create_qp_out_params *out_params)
2742 {
2743         qlnx_host_t     *ha;
2744
2745         ha = dev->ha;
2746
2747         QL_DPRINT12(ha, "enter\n");
2748
2749         qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
2750         qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
2751
2752         qlnxr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
2753                            &qp->usq.pbl_info);
2754
2755         if (qp->srq) {
2756                 QL_DPRINT11(ha, "qp->srq = %p\n", qp->srq);
2757                 return;
2758         }
2759
2760         qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
2761         qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
2762
2763         qlnxr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
2764                            &qp->urq.pbl_info);
2765
2766         QL_DPRINT12(ha, "exit\n");
2767         return;
2768 }
2769
2770 static int
2771 qlnxr_create_user_qp(struct qlnxr_dev *dev,
2772         struct qlnxr_qp *qp,
2773         struct ib_pd *ibpd,
2774         struct ib_udata *udata,
2775         struct ib_qp_init_attr *attrs)
2776 {
2777         struct ecore_rdma_destroy_qp_out_params d_out_params;
2778         struct ecore_rdma_create_qp_in_params in_params;
2779         struct ecore_rdma_create_qp_out_params out_params;
2780         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
2781         struct ib_ucontext *ib_ctx = NULL;
2782         struct qlnxr_ucontext *ctx = NULL;
2783         struct qlnxr_create_qp_ureq ureq;
2784         int alloc_and_init = QLNX_IS_ROCE(dev);
2785         int rc = -EINVAL;
2786         qlnx_host_t     *ha;
2787
2788         ha = dev->ha;
2789
2790         QL_DPRINT12(ha, "enter\n");
2791
2792         ib_ctx = ibpd->uobject->context;
2793         ctx = get_qlnxr_ucontext(ib_ctx);
2794
2795         memset(&ureq, 0, sizeof(ureq));
2796         rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
2797
2798         if (rc) {
2799                 QL_DPRINT11(ha, "ib_copy_from_udata failed [%d]\n", rc);
2800                 return rc;
2801         }
2802
2803         /* SQ - read access only (0), dma sync not required (0) */
2804         rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
2805                                   ureq.sq_len, 0, 0,
2806                                   alloc_and_init);
2807         if (rc) {
2808                 QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2809                 return rc;
2810         }
2811
2812         if (!qp->srq) {
2813                 /* RQ - read access only (0), dma sync not required (0) */
2814                 rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
2815                                           ureq.rq_len, 0, 0,
2816                                           alloc_and_init);
2817
2818                 if (rc) {
2819                         QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2820                         return rc;
2821                 }
2822         }
2823
2824         memset(&in_params, 0, sizeof(in_params));
2825         qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
2826         in_params.qp_handle_lo = ureq.qp_handle_lo;
2827         in_params.qp_handle_hi = ureq.qp_handle_hi;
2828         in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
2829         in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
2830
2831         if (!qp->srq) {
2832                 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
2833                 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
2834         }
2835
2836         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, &in_params, &out_params);
2837
2838         if (!qp->ecore_qp) {
2839                 rc = -ENOMEM;
2840                 QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2841                 goto err1;
2842         }
2843
2844         if (QLNX_IS_IWARP(dev))
2845                 qlnxr_iwarp_populate_user_qp(dev, qp, &out_params);
2846
2847         qp->qp_id = out_params.qp_id;
2848         qp->icid = out_params.icid;
2849
2850         rc = qlnxr_copy_qp_uresp(dev, qp, udata);
2851
2852         if (rc) {
2853                 QL_DPRINT11(ha, "qlnxr_copy_qp_uresp failed\n");
2854                 goto err;
2855         }
2856
2857         qlnxr_qp_user_print(dev, qp);
2858
2859         QL_DPRINT12(ha, "exit\n");
2860         return 0;
2861 err:
2862         rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
2863
2864         if (rc)
2865                 QL_DPRINT12(ha, "fatal fault\n");
2866
2867 err1:
2868         qlnxr_cleanup_user(dev, qp);
2869
2870         QL_DPRINT12(ha, "exit[%d]\n", rc);
2871         return rc;
2872 }
2873
2874 static void
2875 qlnxr_set_roce_db_info(struct qlnxr_dev *dev,
2876         struct qlnxr_qp *qp)
2877 {
2878         qlnx_host_t     *ha;
2879
2880         ha = dev->ha;
2881
2882         QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2883
2884         qp->sq.db = dev->db_addr +
2885                 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2886         qp->sq.db_data.data.icid = qp->icid + 1;
2887
2888         if (!qp->srq) {
2889                 qp->rq.db = dev->db_addr +
2890                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2891                 qp->rq.db_data.data.icid = qp->icid;
2892         }
2893
2894         QL_DPRINT12(ha, "exit\n");
2895         return;
2896 }
2897
2898 static void
2899 qlnxr_set_iwarp_db_info(struct qlnxr_dev *dev,
2900         struct qlnxr_qp *qp)
2901
2902 {
2903         qlnx_host_t     *ha;
2904
2905         ha = dev->ha;
2906
2907         QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2908
2909         qp->sq.db = dev->db_addr +
2910                 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2911         qp->sq.db_data.data.icid = qp->icid;
2912
2913         if (!qp->srq) {
2914                 qp->rq.db = dev->db_addr +
2915                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2916                 qp->rq.db_data.data.icid = qp->icid;
2917
2918                 qp->rq.iwarp_db2 = dev->db_addr +
2919                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2920                 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2921                 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2922         }
2923
2924         QL_DPRINT12(ha,
2925                 "qp->sq.db = %p qp->sq.db_data.data.icid =0x%x\n"
2926                 "\t\t\tqp->rq.db = %p qp->rq.db_data.data.icid =0x%x\n"
2927                 "\t\t\tqp->rq.iwarp_db2 = %p qp->rq.iwarp_db2.data.icid =0x%x"
2928                 " qp->rq.iwarp_db2.data.prod_val =0x%x\n",
2929                 qp->sq.db, qp->sq.db_data.data.icid,
2930                 qp->rq.db, qp->rq.db_data.data.icid,
2931                 qp->rq.iwarp_db2, qp->rq.iwarp_db2_data.data.icid,
2932                 qp->rq.iwarp_db2_data.data.value);
2933
2934         QL_DPRINT12(ha, "exit\n");
2935         return;
2936 }
2937
2938 static int
2939 qlnxr_roce_create_kernel_qp(struct qlnxr_dev *dev,
2940         struct qlnxr_qp *qp,
2941         struct ecore_rdma_create_qp_in_params *in_params,
2942         u32 n_sq_elems,
2943         u32 n_rq_elems)
2944 {
2945         struct ecore_rdma_create_qp_out_params out_params;
2946         int             rc;
2947         qlnx_host_t     *ha;
2948
2949         ha = dev->ha;
2950
2951         QL_DPRINT12(ha, "enter\n");
2952
2953         rc = ecore_chain_alloc(
2954                 dev->cdev,
2955                 ECORE_CHAIN_USE_TO_PRODUCE,
2956                 ECORE_CHAIN_MODE_PBL,
2957                 ECORE_CHAIN_CNT_TYPE_U32,
2958                 n_sq_elems,
2959                 QLNXR_SQE_ELEMENT_SIZE,
2960                 &qp->sq.pbl,
2961                 NULL);
2962
2963         if (rc) {
2964                 QL_DPRINT11(ha, "ecore_chain_alloc qp->sq.pbl failed[%d]\n", rc);
2965                 return rc;
2966         }
2967
2968         in_params->sq_num_pages = ecore_chain_get_page_cnt(&qp->sq.pbl);
2969         in_params->sq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->sq.pbl);
2970
2971         if (!qp->srq) {
2972
2973                 rc = ecore_chain_alloc(
2974                         dev->cdev,
2975                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2976                         ECORE_CHAIN_MODE_PBL,
2977                         ECORE_CHAIN_CNT_TYPE_U32,
2978                         n_rq_elems,
2979                         QLNXR_RQE_ELEMENT_SIZE,
2980                         &qp->rq.pbl,
2981                         NULL);
2982
2983                 if (rc) {
2984                         QL_DPRINT11(ha,
2985                                 "ecore_chain_alloc qp->rq.pbl failed[%d]\n", rc);
2986                         return rc;
2987                 }
2988
2989                 in_params->rq_num_pages = ecore_chain_get_page_cnt(&qp->rq.pbl);
2990                 in_params->rq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->rq.pbl);
2991         }
2992
2993         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2994
2995         if (!qp->ecore_qp) {
2996                 QL_DPRINT11(ha, "qp->ecore_qp == NULL\n");
2997                 return -EINVAL;
2998         }
2999
3000         qp->qp_id = out_params.qp_id;
3001         qp->icid = out_params.icid;
3002
3003         qlnxr_set_roce_db_info(dev, qp);
3004
3005         QL_DPRINT12(ha, "exit\n");
3006         return 0;
3007 }
3008
3009 static int
3010 qlnxr_iwarp_create_kernel_qp(struct qlnxr_dev *dev,
3011         struct qlnxr_qp *qp,
3012         struct ecore_rdma_create_qp_in_params *in_params,
3013         u32 n_sq_elems,
3014         u32 n_rq_elems)
3015 {
3016         struct ecore_rdma_destroy_qp_out_params d_out_params;
3017         struct ecore_rdma_create_qp_out_params out_params;
3018         struct ecore_chain_ext_pbl ext_pbl;
3019         int rc;
3020         qlnx_host_t     *ha;
3021
3022         ha = dev->ha;
3023
3024         QL_DPRINT12(ha, "enter\n");
3025
3026         in_params->sq_num_pages = ECORE_CHAIN_PAGE_CNT(n_sq_elems,
3027                                                      QLNXR_SQE_ELEMENT_SIZE,
3028                                                      ECORE_CHAIN_MODE_PBL);
3029         in_params->rq_num_pages = ECORE_CHAIN_PAGE_CNT(n_rq_elems,
3030                                                      QLNXR_RQE_ELEMENT_SIZE,
3031                                                      ECORE_CHAIN_MODE_PBL);
3032
3033         QL_DPRINT12(ha, "n_sq_elems = 0x%x"
3034                 " n_rq_elems = 0x%x in_params\n"
3035                 "\t\t\tqp_handle_lo\t\t= 0x%08x\n"
3036                 "\t\t\tqp_handle_hi\t\t= 0x%08x\n"
3037                 "\t\t\tqp_handle_async_lo\t\t= 0x%08x\n"
3038                 "\t\t\tqp_handle_async_hi\t\t= 0x%08x\n"
3039                 "\t\t\tuse_srq\t\t\t= 0x%x\n"
3040                 "\t\t\tsignal_all\t\t= 0x%x\n"
3041                 "\t\t\tfmr_and_reserved_lkey\t= 0x%x\n"
3042                 "\t\t\tpd\t\t\t= 0x%x\n"
3043                 "\t\t\tdpi\t\t\t= 0x%x\n"
3044                 "\t\t\tsq_cq_id\t\t\t= 0x%x\n"
3045                 "\t\t\tsq_num_pages\t\t= 0x%x\n"
3046                 "\t\t\tsq_pbl_ptr\t\t= %p\n"
3047                 "\t\t\tmax_sq_sges\t\t= 0x%x\n"
3048                 "\t\t\trq_cq_id\t\t\t= 0x%x\n"
3049                 "\t\t\trq_num_pages\t\t= 0x%x\n"
3050                 "\t\t\trq_pbl_ptr\t\t= %p\n"
3051                 "\t\t\tsrq_id\t\t\t= 0x%x\n"
3052                 "\t\t\tstats_queue\t\t= 0x%x\n",
3053                 n_sq_elems, n_rq_elems,
3054                 in_params->qp_handle_lo,
3055                 in_params->qp_handle_hi,
3056                 in_params->qp_handle_async_lo,
3057                 in_params->qp_handle_async_hi,
3058                 in_params->use_srq,
3059                 in_params->signal_all,
3060                 in_params->fmr_and_reserved_lkey,
3061                 in_params->pd,
3062                 in_params->dpi,
3063                 in_params->sq_cq_id,
3064                 in_params->sq_num_pages,
3065                 (void *)in_params->sq_pbl_ptr,
3066                 in_params->max_sq_sges,
3067                 in_params->rq_cq_id,
3068                 in_params->rq_num_pages,
3069                 (void *)in_params->rq_pbl_ptr,
3070                 in_params->srq_id,
3071                 in_params->stats_queue );
3072
3073         memset(&out_params, 0, sizeof (struct ecore_rdma_create_qp_out_params));
3074         memset(&ext_pbl, 0, sizeof (struct ecore_chain_ext_pbl));
3075
3076         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
3077
3078         if (!qp->ecore_qp) {
3079                 QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
3080                 return -EINVAL;
3081         }
3082
3083         /* Now we allocate the chain */
3084         ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
3085         ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
3086
3087         QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
3088                 "ext_pbl.p_pbl_phys = %p\n",
3089                 ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3090                 
3091         rc = ecore_chain_alloc(
3092                 dev->cdev,
3093                 ECORE_CHAIN_USE_TO_PRODUCE,
3094                 ECORE_CHAIN_MODE_PBL,
3095                 ECORE_CHAIN_CNT_TYPE_U32,
3096                 n_sq_elems,
3097                 QLNXR_SQE_ELEMENT_SIZE,
3098                 &qp->sq.pbl,
3099                 &ext_pbl);
3100
3101         if (rc) {
3102                 QL_DPRINT11(ha,
3103                         "ecore_chain_alloc qp->sq.pbl failed rc = %d\n", rc);
3104                 goto err;
3105         }
3106
3107         ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
3108         ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
3109
3110         QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
3111                 "ext_pbl.p_pbl_phys = %p\n",
3112                 ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3113
3114         if (!qp->srq) {
3115
3116                 rc = ecore_chain_alloc(
3117                         dev->cdev,
3118                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
3119                         ECORE_CHAIN_MODE_PBL,
3120                         ECORE_CHAIN_CNT_TYPE_U32,
3121                         n_rq_elems,
3122                         QLNXR_RQE_ELEMENT_SIZE,
3123                         &qp->rq.pbl,
3124                         &ext_pbl);
3125
3126                 if (rc) {
3127                         QL_DPRINT11(ha,, "ecore_chain_alloc qp->rq.pbl"
3128                                 " failed rc = %d\n", rc);
3129                         goto err;
3130                 }
3131         }
3132
3133         QL_DPRINT12(ha, "qp_id = 0x%x icid =0x%x\n",
3134                 out_params.qp_id, out_params.icid);
3135
3136         qp->qp_id = out_params.qp_id;
3137         qp->icid = out_params.icid;
3138
3139         qlnxr_set_iwarp_db_info(dev, qp);
3140
3141         QL_DPRINT12(ha, "exit\n");
3142         return 0;
3143
3144 err:
3145         ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
3146
3147         QL_DPRINT12(ha, "exit rc = %d\n", rc);
3148         return rc;
3149 }
3150
3151 static int
3152 qlnxr_create_kernel_qp(struct qlnxr_dev *dev,
3153         struct qlnxr_qp *qp,
3154         struct ib_pd *ibpd,
3155         struct ib_qp_init_attr *attrs)
3156 {
3157         struct ecore_rdma_create_qp_in_params in_params;
3158         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3159         int rc = -EINVAL;
3160         u32 n_rq_elems;
3161         u32 n_sq_elems;
3162         u32 n_sq_entries;
3163         struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3164         qlnx_host_t     *ha;
3165
3166         ha = dev->ha;
3167
3168         QL_DPRINT12(ha, "enter\n");
3169
3170         memset(&in_params, 0, sizeof(in_params));
3171
3172         /* A single work request may take up to MAX_SQ_WQE_SIZE elements in
3173          * the ring. The ring should allow at least a single WR, even if the
3174          * user requested none, due to allocation issues.
3175          * We should add an extra WR since the prod and cons indices of
3176          * wqe_wr_id are managed in such a way that the WQ is considered full
3177          * when (prod+1)%max_wr==cons. We currently don't do that because we
3178          * double the number of entries due an iSER issue that pushes far more
3179          * WRs than indicated. If we decline its ib_post_send() then we get
3180          * error prints in the dmesg we'd like to avoid.
3181          */
3182         qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
3183                               qattr->max_wqe);
3184
3185         qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
3186                         GFP_KERNEL);
3187         if (!qp->wqe_wr_id) {
3188                 QL_DPRINT11(ha, "failed SQ shadow memory allocation\n");
3189                 return -ENOMEM;
3190         }
3191
3192         /* QP handle to be written in CQE */
3193         in_params.qp_handle_lo = lower_32_bits((uintptr_t)qp);
3194         in_params.qp_handle_hi = upper_32_bits((uintptr_t)qp);
3195
3196         /* A single work request may take up to MAX_RQ_WQE_SIZE elements in
3197          * the ring. There ring should allow at least a single WR, even if the
3198          * user requested none, due to allocation issues.
3199          */
3200         qp->rq.max_wr = (u16)max_t(u32, attrs->cap.max_recv_wr, 1);
3201
3202         /* Allocate driver internal RQ array */
3203         if (!qp->srq) {
3204                 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
3205                                         GFP_KERNEL);
3206                 if (!qp->rqe_wr_id) {
3207                         QL_DPRINT11(ha, "failed RQ shadow memory allocation\n");
3208                         kfree(qp->wqe_wr_id);
3209                         return -ENOMEM;
3210                 }
3211         }
3212
3213         //qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
3214
3215         in_params.qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
3216         in_params.qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
3217
3218         in_params.signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
3219         in_params.fmr_and_reserved_lkey = true;
3220         in_params.pd = pd->pd_id;
3221         in_params.dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
3222         in_params.sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
3223         in_params.stats_queue = 0;
3224
3225         in_params.rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
3226
3227         if (qp->srq) {
3228                 /* QP is associated with SRQ instead of RQ */
3229                 in_params.srq_id = qp->srq->srq_id;
3230                 in_params.use_srq = true;
3231                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
3232                         in_params.srq_id, in_params.use_srq);
3233         } else {
3234                 in_params.srq_id = 0;
3235                 in_params.use_srq = false;
3236         }
3237
3238         n_sq_entries = attrs->cap.max_send_wr;
3239         n_sq_entries = min_t(u32, n_sq_entries, qattr->max_wqe);
3240         n_sq_entries = max_t(u32, n_sq_entries, 1);
3241         n_sq_elems = n_sq_entries * QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
3242
3243         n_rq_elems = qp->rq.max_wr * QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
3244
3245         if (QLNX_IS_ROCE(dev)) {
3246                 rc = qlnxr_roce_create_kernel_qp(dev, qp, &in_params,
3247                                                 n_sq_elems, n_rq_elems);
3248         } else {
3249                 rc = qlnxr_iwarp_create_kernel_qp(dev, qp, &in_params,
3250                                                  n_sq_elems, n_rq_elems);
3251         }
3252
3253         if (rc)
3254                 qlnxr_cleanup_kernel(dev, qp);
3255
3256         QL_DPRINT12(ha, "exit [%d]\n", rc);
3257         return rc;
3258 }
3259
3260 struct ib_qp *
3261 qlnxr_create_qp(struct ib_pd *ibpd,
3262                 struct ib_qp_init_attr *attrs,
3263                 struct ib_udata *udata)
3264 {
3265         struct qlnxr_dev *dev = get_qlnxr_dev(ibpd->device);
3266         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3267         struct qlnxr_qp *qp;
3268         int rc = 0;
3269         qlnx_host_t     *ha;
3270
3271         ha = dev->ha;
3272
3273         QL_DPRINT12(ha, "enter\n");
3274
3275         rc = qlnxr_check_qp_attrs(ibpd, dev, attrs, udata);
3276         if (rc) {
3277                 QL_DPRINT11(ha, "qlnxr_check_qp_attrs failed [%d]\n", rc);
3278                 return ERR_PTR(rc);
3279         }
3280
3281         QL_DPRINT12(ha, "called from %s, event_handle=%p,"
3282                 " eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
3283                 (udata ? "user library" : "kernel"),
3284                 attrs->event_handler, pd,
3285                 get_qlnxr_cq(attrs->send_cq),
3286                 get_qlnxr_cq(attrs->send_cq)->icid,
3287                 get_qlnxr_cq(attrs->recv_cq),
3288                 get_qlnxr_cq(attrs->recv_cq)->icid);
3289
3290         qp = qlnx_zalloc(sizeof(struct qlnxr_qp));
3291
3292         if (!qp) {
3293                 QL_DPRINT11(ha, "kzalloc(qp) failed\n");
3294                 return ERR_PTR(-ENOMEM);
3295         }
3296
3297         qlnxr_set_common_qp_params(dev, qp, pd, attrs);
3298
3299         if (attrs->qp_type == IB_QPT_GSI) {
3300                 QL_DPRINT11(ha, "calling qlnxr_create_gsi_qp\n");
3301                 return qlnxr_create_gsi_qp(dev, attrs, qp);
3302         }
3303
3304         if (udata) {
3305                 rc = qlnxr_create_user_qp(dev, qp, ibpd, udata, attrs);
3306
3307                 if (rc) {
3308                         QL_DPRINT11(ha, "qlnxr_create_user_qp failed\n");
3309                         goto err;
3310                 }
3311         } else {
3312                 rc = qlnxr_create_kernel_qp(dev, qp, ibpd, attrs);
3313
3314                 if (rc) {
3315                         QL_DPRINT11(ha, "qlnxr_create_kernel_qp failed\n");
3316                         goto err;
3317                 }
3318         }
3319
3320         qp->ibqp.qp_num = qp->qp_id;
3321
3322         rc = qlnxr_idr_add(dev, qp, qp->qp_id);
3323
3324         if (rc) {
3325                 QL_DPRINT11(ha, "qlnxr_idr_add failed\n");
3326                 goto err;
3327         }
3328
3329         QL_DPRINT12(ha, "exit [%p]\n", &qp->ibqp);
3330
3331         return &qp->ibqp;
3332 err:
3333         kfree(qp);
3334
3335         QL_DPRINT12(ha, "failed exit\n");
3336         return ERR_PTR(-EFAULT);
3337 }
3338
3339
3340 static enum ib_qp_state
3341 qlnxr_get_ibqp_state(enum ecore_roce_qp_state qp_state)
3342 {
3343         enum ib_qp_state state = IB_QPS_ERR;
3344
3345         switch (qp_state) {
3346         case ECORE_ROCE_QP_STATE_RESET:
3347                 state = IB_QPS_RESET;
3348                 break;
3349
3350         case ECORE_ROCE_QP_STATE_INIT:
3351                 state = IB_QPS_INIT;
3352                 break;
3353
3354         case ECORE_ROCE_QP_STATE_RTR:
3355                 state = IB_QPS_RTR;
3356                 break;
3357
3358         case ECORE_ROCE_QP_STATE_RTS:
3359                 state = IB_QPS_RTS;
3360                 break;
3361
3362         case ECORE_ROCE_QP_STATE_SQD:
3363                 state = IB_QPS_SQD;
3364                 break;
3365
3366         case ECORE_ROCE_QP_STATE_ERR:
3367                 state = IB_QPS_ERR;
3368                 break;
3369
3370         case ECORE_ROCE_QP_STATE_SQE:
3371                 state = IB_QPS_SQE;
3372                 break;
3373         }
3374         return state;
3375 }
3376
3377 static enum ecore_roce_qp_state
3378 qlnxr_get_state_from_ibqp( enum ib_qp_state qp_state)
3379 {
3380         enum ecore_roce_qp_state ecore_qp_state;
3381
3382         ecore_qp_state = ECORE_ROCE_QP_STATE_ERR;
3383
3384         switch (qp_state) {
3385         case IB_QPS_RESET:
3386                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RESET;
3387                 break;
3388
3389         case IB_QPS_INIT:
3390                 ecore_qp_state =  ECORE_ROCE_QP_STATE_INIT;
3391                 break;
3392
3393         case IB_QPS_RTR:
3394                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RTR;
3395                 break;
3396
3397         case IB_QPS_RTS:
3398                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RTS;
3399                 break;
3400
3401         case IB_QPS_SQD:
3402                 ecore_qp_state =  ECORE_ROCE_QP_STATE_SQD;
3403                 break;
3404
3405         case IB_QPS_ERR:
3406                 ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3407                 break;
3408
3409         default:
3410                 ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3411                 break;
3412         }
3413
3414         return (ecore_qp_state);
3415 }
3416
3417 static void
3418 qlnxr_reset_qp_hwq_info(struct qlnxr_qp_hwq_info *qph)
3419 {
3420         ecore_chain_reset(&qph->pbl);
3421         qph->prod = qph->cons = 0;
3422         qph->wqe_cons = 0;
3423         qph->db_data.data.value = cpu_to_le16(0);
3424
3425         return;
3426 }
3427
3428 static int
3429 qlnxr_update_qp_state(struct qlnxr_dev *dev,
3430         struct qlnxr_qp *qp,
3431         enum ecore_roce_qp_state new_state)
3432 {
3433         int             status = 0;
3434         uint32_t        reg_addr;
3435         struct ecore_dev *cdev;
3436         qlnx_host_t     *ha;
3437
3438         ha = dev->ha;
3439         cdev = &ha->cdev;
3440
3441         QL_DPRINT12(ha, "enter qp = %p new_state = 0x%x qp->state = 0x%x\n",
3442                 qp, new_state, qp->state);
3443
3444         if (new_state == qp->state) {
3445                 return 0;
3446         }
3447
3448         switch (qp->state) {
3449         case ECORE_ROCE_QP_STATE_RESET:
3450                 switch (new_state) {
3451                 case ECORE_ROCE_QP_STATE_INIT:
3452                         qp->prev_wqe_size = 0;
3453                         qlnxr_reset_qp_hwq_info(&qp->sq);
3454                         if (!(qp->srq))
3455                                 qlnxr_reset_qp_hwq_info(&qp->rq);
3456                         break;
3457                 default:
3458                         status = -EINVAL;
3459                         break;
3460                 };
3461                 break;
3462         case ECORE_ROCE_QP_STATE_INIT:
3463                 /* INIT->XXX */
3464                 switch (new_state) {
3465                 case ECORE_ROCE_QP_STATE_RTR:
3466                 /* Update doorbell (in case post_recv was done before move to RTR) */
3467                         if (qp->srq)
3468                                 break;
3469                         wmb();
3470                         //writel(qp->rq.db_data.raw, qp->rq.db);
3471                         //if (QLNX_IS_IWARP(dev))
3472                         //      writel(qp->rq.iwarp_db2_data.raw,
3473                         //             qp->rq.iwarp_db2);
3474
3475                         reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
3476                                         (uint8_t *)cdev->doorbells);
3477
3478                         bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
3479                         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3480
3481                         if (QLNX_IS_IWARP(dev)) {
3482                                 reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
3483                                         (uint8_t *)cdev->doorbells);
3484                                 bus_write_4(ha->pci_dbells, reg_addr,\
3485                                         qp->rq.iwarp_db2_data.raw);
3486                                 bus_barrier(ha->pci_dbells,  0, 0,\
3487                                         BUS_SPACE_BARRIER_READ);
3488                         }
3489
3490                         
3491                         mmiowb();
3492                         break;
3493                 case ECORE_ROCE_QP_STATE_ERR:
3494                         /* TBD:flush qps... */
3495                         break;
3496                 default:
3497                         /* invalid state change. */
3498                         status = -EINVAL;
3499                         break;
3500                 };
3501                 break;
3502         case ECORE_ROCE_QP_STATE_RTR:
3503                 /* RTR->XXX */
3504                 switch (new_state) {
3505                 case ECORE_ROCE_QP_STATE_RTS:
3506                         break;
3507                 case ECORE_ROCE_QP_STATE_ERR:
3508                         break;
3509                 default:
3510                         /* invalid state change. */
3511                         status = -EINVAL;
3512                         break;
3513                 };
3514                 break;
3515         case ECORE_ROCE_QP_STATE_RTS:
3516                 /* RTS->XXX */
3517                 switch (new_state) {
3518                 case ECORE_ROCE_QP_STATE_SQD:
3519                         break;
3520                 case ECORE_ROCE_QP_STATE_ERR:
3521                         break;
3522                 default:
3523                         /* invalid state change. */
3524                         status = -EINVAL;
3525                         break;
3526                 };
3527                 break;
3528         case ECORE_ROCE_QP_STATE_SQD:
3529                 /* SQD->XXX */
3530                 switch (new_state) {
3531                 case ECORE_ROCE_QP_STATE_RTS:
3532                 case ECORE_ROCE_QP_STATE_ERR:
3533                         break;
3534                 default:
3535                         /* invalid state change. */
3536                         status = -EINVAL;
3537                         break;
3538                 };
3539                 break;
3540         case ECORE_ROCE_QP_STATE_ERR:
3541                 /* ERR->XXX */
3542                 switch (new_state) {
3543                 case ECORE_ROCE_QP_STATE_RESET:
3544                         if ((qp->rq.prod != qp->rq.cons) ||
3545                             (qp->sq.prod != qp->sq.cons)) {
3546                                 QL_DPRINT11(ha,
3547                                         "Error->Reset with rq/sq "
3548                                         "not empty rq.prod=0x%x rq.cons=0x%x"
3549                                         " sq.prod=0x%x sq.cons=0x%x\n",
3550                                         qp->rq.prod, qp->rq.cons,
3551                                         qp->sq.prod, qp->sq.cons);
3552                                 status = -EINVAL;
3553                         }
3554                         break;
3555                 default:
3556                         status = -EINVAL;
3557                         break;
3558                 };
3559                 break;
3560         default:
3561                 status = -EINVAL;
3562                 break;
3563         };
3564
3565         QL_DPRINT12(ha, "exit\n");
3566         return status;
3567 }
3568
3569 int
3570 qlnxr_modify_qp(struct ib_qp    *ibqp,
3571         struct ib_qp_attr       *attr,
3572         int                     attr_mask,
3573         struct ib_udata         *udata)
3574 {
3575         int rc = 0;
3576         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3577         struct qlnxr_dev *dev = get_qlnxr_dev(&qp->dev->ibdev);
3578         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
3579         enum ib_qp_state old_qp_state, new_qp_state;
3580         struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3581         qlnx_host_t     *ha;
3582
3583         ha = dev->ha;
3584
3585         QL_DPRINT12(ha,
3586                 "enter qp = %p attr_mask = 0x%x, state = %d udata = %p\n",
3587                 qp, attr_mask, attr->qp_state, udata);
3588
3589         old_qp_state = qlnxr_get_ibqp_state(qp->state);
3590         if (attr_mask & IB_QP_STATE)
3591                 new_qp_state = attr->qp_state;
3592         else
3593                 new_qp_state = old_qp_state;
3594
3595         if (QLNX_IS_ROCE(dev)) {
3596 #if __FreeBSD_version >= 1100000
3597                 if (!ib_modify_qp_is_ok(old_qp_state,
3598                                         new_qp_state,
3599                                         ibqp->qp_type,
3600                                         attr_mask,
3601                                         IB_LINK_LAYER_ETHERNET)) {
3602                         QL_DPRINT12(ha,
3603                                 "invalid attribute mask=0x%x"
3604                                 " specified for qpn=0x%x of type=0x%x \n"
3605                                 " old_qp_state=0x%x, new_qp_state=0x%x\n",
3606                                 attr_mask, qp->qp_id, ibqp->qp_type,
3607                                 old_qp_state, new_qp_state);
3608                         rc = -EINVAL;
3609                         goto err;
3610                 }
3611 #else
3612                 if (!ib_modify_qp_is_ok(old_qp_state,
3613                                         new_qp_state,
3614                                         ibqp->qp_type,
3615                                         attr_mask )) {
3616                         QL_DPRINT12(ha,
3617                                 "invalid attribute mask=0x%x"
3618                                 " specified for qpn=0x%x of type=0x%x \n"
3619                                 " old_qp_state=0x%x, new_qp_state=0x%x\n",
3620                                 attr_mask, qp->qp_id, ibqp->qp_type,
3621                                 old_qp_state, new_qp_state);
3622                         rc = -EINVAL;
3623                         goto err;
3624                 }
3625
3626 #endif /* #if __FreeBSD_version >= 1100000 */
3627         }
3628         /* translate the masks... */
3629         if (attr_mask & IB_QP_STATE) {
3630                 SET_FIELD(qp_params.modify_flags,
3631                           ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
3632                 qp_params.new_state = qlnxr_get_state_from_ibqp(attr->qp_state);
3633         }
3634
3635         // TBD consider changing ecore to be a flag as well...
3636         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
3637                 qp_params.sqd_async = true;
3638
3639         if (attr_mask & IB_QP_PKEY_INDEX) {
3640                 SET_FIELD(qp_params.modify_flags,
3641                           ECORE_ROCE_MODIFY_QP_VALID_PKEY,
3642                           1);
3643                 if (attr->pkey_index >= QLNXR_ROCE_PKEY_TABLE_LEN) {
3644                         rc = -EINVAL;
3645                         goto err;
3646                 }
3647
3648                 qp_params.pkey = QLNXR_ROCE_PKEY_DEFAULT;
3649         }
3650
3651         if (attr_mask & IB_QP_QKEY) {
3652                 qp->qkey = attr->qkey;
3653         }
3654
3655         /* tbd consider splitting in ecore.. */
3656         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3657                 SET_FIELD(qp_params.modify_flags,
3658                           ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
3659                 qp_params.incoming_rdma_read_en =
3660                         attr->qp_access_flags & IB_ACCESS_REMOTE_READ;
3661                 qp_params.incoming_rdma_write_en =
3662                         attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE;
3663                 qp_params.incoming_atomic_en =
3664                         attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC;
3665         }
3666
3667         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
3668                 if (attr_mask & IB_QP_PATH_MTU) {
3669                         if (attr->path_mtu < IB_MTU_256 ||
3670                             attr->path_mtu > IB_MTU_4096) {
3671
3672                                 QL_DPRINT12(ha,
3673                                         "Only MTU sizes of 256, 512, 1024,"
3674                                         " 2048 and 4096 are supported "
3675                                         " attr->path_mtu = [%d]\n",
3676                                         attr->path_mtu);
3677
3678                                 rc = -EINVAL;
3679                                 goto err;
3680                         }
3681                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
3682                                       ib_mtu_enum_to_int(
3683                                                 iboe_get_mtu(dev->ha->ifp->if_mtu)));
3684                 }
3685
3686                 if (qp->mtu == 0) {
3687                         qp->mtu = ib_mtu_enum_to_int(
3688                                         iboe_get_mtu(dev->ha->ifp->if_mtu));
3689                         QL_DPRINT12(ha, "fixing zetoed MTU to qp->mtu = %d\n",
3690                                 qp->mtu);
3691                 }
3692
3693                 SET_FIELD(qp_params.modify_flags,
3694                           ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR,
3695                           1);
3696
3697                 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
3698                 qp_params.flow_label = attr->ah_attr.grh.flow_label;
3699                 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
3700
3701                 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
3702
3703                 get_gid_info(ibqp, attr, attr_mask, dev, qp, &qp_params);
3704
3705                 rc = qlnxr_get_dmac(dev, &attr->ah_attr, qp_params.remote_mac_addr);
3706                 if (rc)
3707                         return rc;
3708
3709                 qp_params.use_local_mac = true;
3710                 memcpy(qp_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
3711
3712                 QL_DPRINT12(ha, "dgid=0x%x:0x%x:0x%x:0x%x\n",
3713                        qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
3714                        qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
3715                 QL_DPRINT12(ha, "sgid=0x%x:0x%x:0x%x:0x%x\n",
3716                        qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
3717                        qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
3718                 QL_DPRINT12(ha,
3719                         "remote_mac=[0x%x:0x%x:0x%x:0x%x:0x%x:0x%x]\n",
3720                         qp_params.remote_mac_addr[0],
3721                         qp_params.remote_mac_addr[1],
3722                         qp_params.remote_mac_addr[2],
3723                         qp_params.remote_mac_addr[3],
3724                         qp_params.remote_mac_addr[4],
3725                         qp_params.remote_mac_addr[5]);
3726
3727                 qp_params.mtu = qp->mtu;
3728         }
3729
3730         if (qp_params.mtu == 0) {
3731                 /* stay with current MTU */
3732                 if (qp->mtu) {
3733                         qp_params.mtu = qp->mtu;
3734                 } else {
3735                         qp_params.mtu = ib_mtu_enum_to_int(
3736                                                 iboe_get_mtu(dev->ha->ifp->if_mtu));
3737                 }
3738         }
3739
3740         if (attr_mask & IB_QP_TIMEOUT) {
3741                 SET_FIELD(qp_params.modify_flags, \
3742                         ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
3743
3744                 qp_params.ack_timeout = attr->timeout;
3745                 if (attr->timeout) {
3746                         u32 temp;
3747
3748                         /* 12.7.34 LOCAL ACK TIMEOUT
3749                          * Value representing the transport (ACK) timeout for
3750                          * use by the remote, expressed as (4.096 Î¼S*2Local ACK
3751                          * Timeout)
3752                          */
3753                         /* We use 1UL since the temporal value may be  overflow
3754                          * 32 bits
3755                          */
3756                         temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
3757                         qp_params.ack_timeout = temp; /* FW requires [msec] */
3758                 }
3759                 else
3760                         qp_params.ack_timeout = 0; /* infinite */
3761         }
3762         if (attr_mask & IB_QP_RETRY_CNT) {
3763                 SET_FIELD(qp_params.modify_flags,\
3764                          ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
3765                 qp_params.retry_cnt = attr->retry_cnt;
3766         }
3767
3768         if (attr_mask & IB_QP_RNR_RETRY) {
3769                 SET_FIELD(qp_params.modify_flags,
3770                           ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT,
3771                           1);
3772                 qp_params.rnr_retry_cnt = attr->rnr_retry;
3773         }
3774
3775         if (attr_mask & IB_QP_RQ_PSN) {
3776                 SET_FIELD(qp_params.modify_flags,
3777                           ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN,
3778                           1);
3779                 qp_params.rq_psn = attr->rq_psn;
3780                 qp->rq_psn = attr->rq_psn;
3781         }
3782
3783         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3784                 if (attr->max_rd_atomic > qattr->max_qp_req_rd_atomic_resc) {
3785                         rc = -EINVAL;
3786                         QL_DPRINT12(ha,
3787                                 "unsupported  max_rd_atomic=%d, supported=%d\n",
3788                                 attr->max_rd_atomic,
3789                                 qattr->max_qp_req_rd_atomic_resc);
3790                         goto err;
3791                 }
3792
3793                 SET_FIELD(qp_params.modify_flags,
3794                           ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ,
3795                           1);
3796                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
3797         }
3798
3799         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
3800                 SET_FIELD(qp_params.modify_flags,
3801                           ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER,
3802                           1);
3803                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
3804         }
3805
3806         if (attr_mask & IB_QP_SQ_PSN) {
3807                 SET_FIELD(qp_params.modify_flags,
3808                           ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN,
3809                           1);
3810                 qp_params.sq_psn = attr->sq_psn;
3811                 qp->sq_psn = attr->sq_psn;
3812         }
3813
3814         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3815                 if (attr->max_dest_rd_atomic >
3816                     qattr->max_qp_resp_rd_atomic_resc) {
3817                         QL_DPRINT12(ha,
3818                                 "unsupported max_dest_rd_atomic=%d, "
3819                                 "supported=%d\n",
3820                                 attr->max_dest_rd_atomic,
3821                                 qattr->max_qp_resp_rd_atomic_resc);
3822
3823                         rc = -EINVAL;
3824                         goto err;
3825                 }
3826
3827                 SET_FIELD(qp_params.modify_flags,
3828                           ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP,
3829                           1);
3830                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
3831         }
3832
3833         if (attr_mask & IB_QP_DEST_QPN) {
3834                 SET_FIELD(qp_params.modify_flags,
3835                           ECORE_ROCE_MODIFY_QP_VALID_DEST_QP,
3836                           1);
3837
3838                 qp_params.dest_qp = attr->dest_qp_num;
3839                 qp->dest_qp_num = attr->dest_qp_num;
3840         }
3841
3842         /*
3843          * Update the QP state before the actual ramrod to prevent a race with
3844          * fast path. Modifying the QP state to error will cause the device to
3845          * flush the CQEs and while polling the flushed CQEs will considered as
3846          * a potential issue if the QP isn't in error state.
3847          */
3848         if ((attr_mask & IB_QP_STATE) && (qp->qp_type != IB_QPT_GSI) &&
3849                 (!udata) && (qp_params.new_state == ECORE_ROCE_QP_STATE_ERR))
3850                 qp->state = ECORE_ROCE_QP_STATE_ERR;
3851
3852         if (qp->qp_type != IB_QPT_GSI)
3853                 rc = ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
3854
3855         if (attr_mask & IB_QP_STATE) {
3856                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
3857                         rc = qlnxr_update_qp_state(dev, qp, qp_params.new_state);
3858                 qp->state = qp_params.new_state;
3859         }
3860
3861 err:
3862         QL_DPRINT12(ha, "exit\n");
3863         return rc;
3864 }
3865
3866 static int
3867 qlnxr_to_ib_qp_acc_flags(struct ecore_rdma_query_qp_out_params *params)
3868 {
3869         int ib_qp_acc_flags = 0;
3870
3871         if (params->incoming_rdma_write_en)
3872                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
3873         if (params->incoming_rdma_read_en)
3874                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
3875         if (params->incoming_atomic_en)
3876                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
3877         if (true) /* FIXME -> local write ?? */
3878                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
3879
3880         return ib_qp_acc_flags;
3881 }
3882
3883 static enum ib_mtu
3884 qlnxr_mtu_int_to_enum(u16 mtu)
3885 {
3886         enum ib_mtu ib_mtu_size;
3887
3888         switch (mtu) {
3889         case 256:
3890                 ib_mtu_size = IB_MTU_256;
3891                 break;
3892
3893         case 512:
3894                 ib_mtu_size = IB_MTU_512;
3895                 break;
3896
3897         case 1024:
3898                 ib_mtu_size = IB_MTU_1024;
3899                 break;
3900
3901         case 2048:
3902                 ib_mtu_size = IB_MTU_2048;
3903                 break;
3904
3905         case 4096:
3906                 ib_mtu_size = IB_MTU_4096;
3907                 break;
3908
3909         default:
3910                 ib_mtu_size = IB_MTU_1024;
3911                 break;
3912         }
3913         return (ib_mtu_size);
3914 }
3915
3916 int
3917 qlnxr_query_qp(struct ib_qp *ibqp,
3918         struct ib_qp_attr *qp_attr,
3919         int attr_mask,
3920         struct ib_qp_init_attr *qp_init_attr)
3921 {
3922         int rc = 0;
3923         struct ecore_rdma_query_qp_out_params params;
3924         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3925         struct qlnxr_dev *dev = qp->dev;
3926         qlnx_host_t     *ha;
3927
3928         ha = dev->ha;
3929
3930         QL_DPRINT12(ha, "enter\n");
3931
3932         memset(&params, 0, sizeof(params));
3933
3934         rc = ecore_rdma_query_qp(dev->rdma_ctx, qp->ecore_qp, &params);
3935         if (rc)
3936                 goto err;
3937
3938         memset(qp_attr, 0, sizeof(*qp_attr));
3939         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3940
3941         qp_attr->qp_state = qlnxr_get_ibqp_state(params.state);
3942         qp_attr->cur_qp_state = qlnxr_get_ibqp_state(params.state);
3943
3944         /* In some cases in iWARP qelr will ask for the state only */
3945         if (QLNX_IS_IWARP(dev) && (attr_mask == IB_QP_STATE)) {
3946                 QL_DPRINT11(ha, "only state requested\n");
3947                 return 0;
3948         }
3949
3950         qp_attr->path_mtu = qlnxr_mtu_int_to_enum(params.mtu);
3951         qp_attr->path_mig_state = IB_MIG_MIGRATED;
3952         qp_attr->rq_psn = params.rq_psn;
3953         qp_attr->sq_psn = params.sq_psn;
3954         qp_attr->dest_qp_num = params.dest_qp;
3955
3956         qp_attr->qp_access_flags = qlnxr_to_ib_qp_acc_flags(&params);
3957
3958         QL_DPRINT12(ha, "qp_state = 0x%x cur_qp_state = 0x%x "
3959                 "path_mtu = %d qp_access_flags = 0x%x\n",
3960                 qp_attr->qp_state, qp_attr->cur_qp_state, qp_attr->path_mtu,
3961                 qp_attr->qp_access_flags);
3962
3963         qp_attr->cap.max_send_wr = qp->sq.max_wr;
3964         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
3965         qp_attr->cap.max_send_sge = qp->sq.max_sges;
3966         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
3967         qp_attr->cap.max_inline_data = qp->max_inline_data;
3968         qp_init_attr->cap = qp_attr->cap;
3969
3970         memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
3971                sizeof(qp_attr->ah_attr.grh.dgid.raw));
3972
3973         qp_attr->ah_attr.grh.flow_label = params.flow_label;
3974         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
3975         qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
3976         qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
3977
3978         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
3979         qp_attr->ah_attr.port_num = 1; /* FIXME -> check this */
3980         qp_attr->ah_attr.sl = 0;/* FIXME -> check this */
3981         qp_attr->timeout = params.timeout;
3982         qp_attr->rnr_retry = params.rnr_retry;
3983         qp_attr->retry_cnt = params.retry_cnt;
3984         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
3985         qp_attr->pkey_index = params.pkey_index;
3986         qp_attr->port_num = 1; /* FIXME -> check this */
3987         qp_attr->ah_attr.src_path_bits = 0;
3988         qp_attr->ah_attr.static_rate = 0;
3989         qp_attr->alt_pkey_index = 0;
3990         qp_attr->alt_port_num = 0;
3991         qp_attr->alt_timeout = 0;
3992         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
3993
3994         qp_attr->sq_draining = (params.state == ECORE_ROCE_QP_STATE_SQD) ? 1 : 0;
3995         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
3996         qp_attr->max_rd_atomic = params.max_rd_atomic;
3997         qp_attr->en_sqd_async_notify = (params.sqd_async)? 1 : 0;
3998
3999         QL_DPRINT12(ha, "max_inline_data=%d\n",
4000                 qp_attr->cap.max_inline_data);
4001
4002 err:
4003         QL_DPRINT12(ha, "exit\n");
4004         return rc;
4005 }
4006
4007
4008 static void
4009 qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
4010 {
4011         qlnx_host_t     *ha;
4012
4013         ha = dev->ha;
4014
4015         QL_DPRINT12(ha, "enter\n");
4016  
4017         if (qp->usq.umem)
4018                 ib_umem_release(qp->usq.umem);
4019
4020         qp->usq.umem = NULL;
4021
4022         if (qp->urq.umem)
4023                 ib_umem_release(qp->urq.umem);
4024
4025         qp->urq.umem = NULL;
4026
4027         QL_DPRINT12(ha, "exit\n");
4028         return;
4029 }
4030
4031 static void
4032 qlnxr_cleanup_kernel(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
4033 {
4034         qlnx_host_t     *ha;
4035
4036         ha = dev->ha;
4037
4038         QL_DPRINT12(ha, "enter\n");
4039  
4040         if (qlnxr_qp_has_sq(qp)) {
4041                 QL_DPRINT12(ha, "freeing SQ\n");
4042                 ha->qlnxr_debug = 1;
4043 //              ecore_chain_free(dev->cdev, &qp->sq.pbl);
4044                 ha->qlnxr_debug = 0;
4045                 kfree(qp->wqe_wr_id);
4046         }
4047
4048         if (qlnxr_qp_has_rq(qp)) {
4049                 QL_DPRINT12(ha, "freeing RQ\n");
4050                 ha->qlnxr_debug = 1;
4051         //      ecore_chain_free(dev->cdev, &qp->rq.pbl);
4052                 ha->qlnxr_debug = 0;
4053                 kfree(qp->rqe_wr_id);
4054         }
4055
4056         QL_DPRINT12(ha, "exit\n");
4057         return;
4058 }
4059
4060 int
4061 qlnxr_free_qp_resources(struct qlnxr_dev *dev,
4062         struct qlnxr_qp *qp)
4063 {
4064         int             rc = 0;
4065         qlnx_host_t     *ha;
4066         struct ecore_rdma_destroy_qp_out_params d_out_params;
4067
4068         ha = dev->ha;
4069
4070         QL_DPRINT12(ha, "enter\n");
4071  
4072 #if 0
4073         if (qp->qp_type != IB_QPT_GSI) {
4074                 rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
4075                                 &d_out_params);
4076                 if (rc)
4077                         return rc;
4078         }
4079
4080         if (qp->ibqp.uobject && qp->ibqp.uobject->context)
4081                 qlnxr_cleanup_user(dev, qp);
4082         else
4083                 qlnxr_cleanup_kernel(dev, qp);
4084 #endif
4085
4086         if (qp->ibqp.uobject && qp->ibqp.uobject->context)
4087                 qlnxr_cleanup_user(dev, qp);
4088         else
4089                 qlnxr_cleanup_kernel(dev, qp);
4090
4091         if (qp->qp_type != IB_QPT_GSI) {
4092                 rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
4093                                 &d_out_params);
4094                 if (rc)
4095                         return rc;
4096         }
4097
4098         QL_DPRINT12(ha, "exit\n");
4099         return 0;
4100 }
4101
4102 int
4103 qlnxr_destroy_qp(struct ib_qp *ibqp)
4104 {
4105         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
4106         struct qlnxr_dev *dev = qp->dev;
4107         int rc = 0;
4108         struct ib_qp_attr attr;
4109         int attr_mask = 0;
4110         qlnx_host_t     *ha;
4111
4112         ha = dev->ha;
4113
4114         QL_DPRINT12(ha, "enter qp = %p, qp_type=%d\n", qp, qp->qp_type);
4115
4116         qp->destroyed = 1;
4117
4118         if (QLNX_IS_ROCE(dev) && (qp->state != (ECORE_ROCE_QP_STATE_RESET |
4119                                   ECORE_ROCE_QP_STATE_ERR |
4120                                   ECORE_ROCE_QP_STATE_INIT))) {
4121
4122                 attr.qp_state = IB_QPS_ERR;
4123                 attr_mask |= IB_QP_STATE;
4124
4125                 /* change the QP state to ERROR */
4126                 qlnxr_modify_qp(ibqp, &attr, attr_mask, NULL);
4127         }
4128
4129         if (qp->qp_type == IB_QPT_GSI)
4130                 qlnxr_destroy_gsi_qp(dev);
4131
4132         qp->sig = ~qp->sig;
4133
4134         qlnxr_free_qp_resources(dev, qp);
4135
4136         if (atomic_dec_and_test(&qp->refcnt)) {
4137                 /* TODO: only for iWARP? */
4138                 qlnxr_idr_remove(dev, qp->qp_id);
4139                 kfree(qp);
4140         }
4141
4142         QL_DPRINT12(ha, "exit\n");
4143         return rc;
4144 }
4145
4146 static inline int
4147 qlnxr_wq_is_full(struct qlnxr_qp_hwq_info *wq)
4148 {
4149         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
4150 }
4151
4152 static int
4153 sge_data_len(struct ib_sge *sg_list, int num_sge)
4154 {
4155         int i, len = 0;
4156         for (i = 0; i < num_sge; i++)
4157                 len += sg_list[i].length;
4158         return len;
4159 }
4160
4161 static void
4162 swap_wqe_data64(u64 *p)
4163 {
4164         int i;
4165
4166         for (i = 0; i < QLNXR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
4167                 *p = cpu_to_be64(cpu_to_le64(*p));
4168 }
4169
4170
4171 static u32
4172 qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
4173         struct qlnxr_qp         *qp,
4174         u8                      *wqe_size,
4175         struct ib_send_wr       *wr,
4176         struct ib_send_wr       **bad_wr,
4177         u8                      *bits,
4178         u8                      bit)
4179 {
4180         int i, seg_siz;
4181         char *seg_prt, *wqe;
4182         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
4183         qlnx_host_t     *ha;
4184
4185         ha = dev->ha;
4186
4187         QL_DPRINT12(ha, "enter[%d]\n", data_size);
4188
4189         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
4190                 QL_DPRINT12(ha,
4191                         "Too much inline data in WR:[%d, %d]\n",
4192                         data_size, ROCE_REQ_MAX_INLINE_DATA_SIZE);
4193                 *bad_wr = wr;
4194                 return 0;
4195         }
4196
4197         if (!data_size)
4198                 return data_size;
4199
4200         /* set the bit */
4201         *bits |= bit;
4202
4203         seg_prt = wqe = NULL;
4204         seg_siz = 0;
4205
4206         /* copy data inline */
4207         for (i = 0; i < wr->num_sge; i++) {
4208                 u32 len = wr->sg_list[i].length;
4209                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
4210
4211                 while (len > 0) {
4212                         u32 cur;
4213
4214                         /* new segment required */
4215                         if (!seg_siz) {
4216                                 wqe = (char *)ecore_chain_produce(&qp->sq.pbl);
4217                                 seg_prt = wqe;
4218                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
4219                                 (*wqe_size)++;
4220                         }
4221
4222                         /* calculate currently allowed length */
4223                         cur = MIN(len, seg_siz);
4224
4225                         memcpy(seg_prt, src, cur);
4226
4227                         /* update segment variables */
4228                         seg_prt += cur;
4229                         seg_siz -= cur;
4230                         /* update sge variables */
4231                         src += cur;
4232                         len -= cur;
4233
4234                         /* swap fully-completed segments */
4235                         if (!seg_siz)
4236                                 swap_wqe_data64((u64 *)wqe);
4237                 }
4238         }
4239
4240         /* swap last not completed segment */
4241         if (seg_siz)
4242                 swap_wqe_data64((u64 *)wqe);
4243
4244         QL_DPRINT12(ha, "exit\n");
4245         return data_size;
4246 }
4247
4248 static u32
4249 qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
4250         u8 *wqe_size, struct ib_send_wr *wr)
4251 {
4252         int i;
4253         u32 data_size = 0;
4254         qlnx_host_t     *ha;
4255
4256         ha = dev->ha;
4257
4258         QL_DPRINT12(ha, "enter wr->num_sge = %d \n", wr->num_sge);
4259  
4260         for (i = 0; i < wr->num_sge; i++) {
4261                 struct rdma_sq_sge *sge = ecore_chain_produce(&qp->sq.pbl);
4262
4263                 TYPEPTR_ADDR_SET(sge, addr, wr->sg_list[i].addr);
4264                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
4265                 sge->length = cpu_to_le32(wr->sg_list[i].length);
4266                 data_size += wr->sg_list[i].length;
4267         }
4268
4269         if (wqe_size)
4270                 *wqe_size += wr->num_sge;
4271
4272         QL_DPRINT12(ha, "exit data_size = %d\n", data_size);
4273         return data_size;
4274 }
4275
4276 static u32
4277 qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
4278         struct qlnxr_qp *qp,
4279         struct rdma_sq_rdma_wqe_1st *rwqe,
4280         struct rdma_sq_rdma_wqe_2nd *rwqe2,
4281         struct ib_send_wr *wr,
4282         struct ib_send_wr **bad_wr)
4283 {
4284         qlnx_host_t     *ha;
4285         u32             ret = 0;
4286
4287         ha = dev->ha;
4288
4289         QL_DPRINT12(ha, "enter\n");
4290  
4291         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
4292         TYPEPTR_ADDR_SET(rwqe2, remote_va, rdma_wr(wr)->remote_addr);
4293
4294         if (wr->send_flags & IB_SEND_INLINE) {
4295                 u8 flags = 0;
4296                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
4297                 return qlnxr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size,
4298                                 wr, bad_wr, &rwqe->flags, flags);
4299         }
4300
4301         ret = qlnxr_prepare_sq_sges(dev, qp, &rwqe->wqe_size, wr);
4302
4303         QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4304
4305         return (ret);
4306 }
4307
4308 static u32
4309 qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
4310         struct qlnxr_qp *qp,
4311         struct rdma_sq_send_wqe *swqe,
4312         struct rdma_sq_send_wqe *swqe2,
4313         struct ib_send_wr *wr,
4314         struct ib_send_wr **bad_wr)
4315 {
4316         qlnx_host_t     *ha;
4317         u32             ret = 0;
4318
4319         ha = dev->ha;
4320
4321         QL_DPRINT12(ha, "enter\n");
4322  
4323         memset(swqe2, 0, sizeof(*swqe2));
4324
4325         if (wr->send_flags & IB_SEND_INLINE) {
4326                 u8 flags = 0;
4327                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
4328                 return qlnxr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size,
4329                                 wr, bad_wr, &swqe->flags, flags);
4330         }
4331
4332         ret = qlnxr_prepare_sq_sges(dev, qp, &swqe->wqe_size, wr);
4333
4334         QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4335
4336         return (ret);
4337 }
4338
4339 static void
4340 qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
4341 {
4342         qlnx_host_t     *ha;
4343
4344         ha = dev->ha;
4345
4346         int work = info->completed - info->completed_handled - 1;
4347
4348         QL_DPRINT12(ha, "enter [%d]\n", work);
4349  
4350         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
4351                 struct qlnxr_pbl *pbl;
4352
4353                 /* Free all the page list that are possible to be freed
4354                  * (all the ones that were invalidated), under the assumption
4355                  * that if an FMR was completed successfully that means that
4356                  * if there was an invalidate operation before it also ended
4357                  */
4358                 pbl = list_first_entry(&info->inuse_pbl_list,
4359                                        struct qlnxr_pbl,
4360                                        list_entry);
4361                 list_del(&pbl->list_entry);
4362                 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
4363                 info->completed_handled++;
4364         }
4365
4366         QL_DPRINT12(ha, "exit\n");
4367         return;
4368 }
4369
4370 #if __FreeBSD_version >= 1102000
4371
4372 static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
4373                 struct rdma_sq_fmr_wqe_1st *fwqe1,
4374                 struct ib_reg_wr *wr)
4375 {
4376         struct qlnxr_mr *mr = get_qlnxr_mr(wr->mr);
4377         struct rdma_sq_fmr_wqe_2nd *fwqe2;
4378
4379         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)ecore_chain_produce(&qp->sq.pbl);
4380         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
4381         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
4382         fwqe1->l_key = wr->key;
4383
4384         fwqe2->access_ctrl = 0;
4385
4386         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
4387                 !!(wr->access & IB_ACCESS_REMOTE_READ));
4388         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
4389                 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
4390         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
4391                 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
4392         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
4393         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
4394                 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
4395         fwqe2->fmr_ctrl = 0;
4396
4397         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4398                 ilog2(mr->ibmr.page_size) - 12);
4399
4400         fwqe2->length_hi = 0; /* TODO - figure out why length is only 32bit.. */
4401         fwqe2->length_lo = mr->ibmr.length;
4402         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
4403         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
4404
4405         qp->wqe_wr_id[qp->sq.prod].mr = mr;
4406
4407         return 0;
4408 }
4409
4410 #else
4411
4412 static void
4413 build_frmr_pbes(struct qlnxr_dev *dev, struct ib_send_wr *wr,
4414         struct mr_info *info)
4415 {
4416         int i;
4417         u64 buf_addr = 0;
4418         int num_pbes, total_num_pbes = 0;
4419         struct regpair *pbe;
4420         struct qlnxr_pbl *pbl_tbl = info->pbl_table;
4421         struct qlnxr_pbl_info *pbl_info = &info->pbl_info;
4422         qlnx_host_t     *ha;
4423
4424         ha = dev->ha;
4425
4426         QL_DPRINT12(ha, "enter\n");
4427  
4428         pbe = (struct regpair *)pbl_tbl->va;
4429         num_pbes = 0;
4430
4431         for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
4432                 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
4433                 pbe->lo = cpu_to_le32((u32)buf_addr);
4434                 pbe->hi = cpu_to_le32((u32)upper_32_bits(buf_addr));
4435
4436                 num_pbes += 1;
4437                 pbe++;
4438                 total_num_pbes++;
4439
4440                 if (total_num_pbes == pbl_info->num_pbes)
4441                         return;
4442
4443                 /* if the given pbl is full storing the pbes,
4444                  * move to next pbl.
4445                  */
4446                 if (num_pbes ==
4447                     (pbl_info->pbl_size / sizeof(u64))) {
4448                         pbl_tbl++;
4449                         pbe = (struct regpair *)pbl_tbl->va;
4450                         num_pbes = 0;
4451                 }
4452         }
4453         QL_DPRINT12(ha, "exit\n");
4454
4455         return;
4456 }
4457
4458 static int
4459 qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
4460 {
4461         int rc = 0;
4462         qlnx_host_t     *ha;
4463
4464         ha = dev->ha;
4465
4466         QL_DPRINT12(ha, "enter\n");
4467  
4468         if (info->completed == 0) {
4469                 //DP_VERBOSE(dev, QLNXR_MSG_MR, "First FMR\n");
4470                 /* first fmr */
4471                 return 0;
4472         }
4473
4474         qlnx_handle_completed_mrs(dev, info);
4475
4476         list_add_tail(&info->pbl_table->list_entry, &info->inuse_pbl_list);
4477
4478         if (list_empty(&info->free_pbl_list)) {
4479                 info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info,
4480                                                           GFP_ATOMIC);
4481         } else {
4482                 info->pbl_table = list_first_entry(&info->free_pbl_list,
4483                                         struct qlnxr_pbl,
4484                                         list_entry);
4485                 list_del(&info->pbl_table->list_entry);
4486         }
4487
4488         if (!info->pbl_table)
4489                 rc = -ENOMEM;
4490
4491         QL_DPRINT12(ha, "exit\n");
4492         return rc;
4493 }
4494
4495 static inline int
4496 qlnxr_prepare_fmr(struct qlnxr_qp *qp,
4497         struct rdma_sq_fmr_wqe_1st *fwqe1,
4498         struct ib_send_wr *wr)
4499 {
4500         struct qlnxr_dev *dev = qp->dev;
4501         u64 fbo;
4502         struct qlnxr_fast_reg_page_list *frmr_list =
4503                 get_qlnxr_frmr_list(wr->wr.fast_reg.page_list);
4504         struct rdma_sq_fmr_wqe *fwqe2 =
4505                 (struct rdma_sq_fmr_wqe *)ecore_chain_produce(&qp->sq.pbl);
4506         int rc = 0;
4507         qlnx_host_t     *ha;
4508
4509         ha = dev->ha;
4510
4511         QL_DPRINT12(ha, "enter\n");
4512  
4513         if (wr->wr.fast_reg.page_list_len == 0)
4514                 BUG();
4515
4516         rc = qlnxr_prepare_safe_pbl(dev, &frmr_list->info);
4517         if (rc)
4518                 return rc;
4519
4520         fwqe1->addr.hi = upper_32_bits(wr->wr.fast_reg.iova_start);
4521         fwqe1->addr.lo = lower_32_bits(wr->wr.fast_reg.iova_start);
4522         fwqe1->l_key = wr->wr.fast_reg.rkey;
4523
4524         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_READ,
4525                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ));
4526         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_WRITE,
4527                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE));
4528         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_ENABLE_ATOMIC,
4529                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_ATOMIC));
4530         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_READ, 1);
4531         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_WRITE,
4532                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE));
4533
4534         fwqe2->fmr_ctrl = 0;
4535
4536         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4537                    ilog2(1 << wr->wr.fast_reg.page_shift) - 12);
4538         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_ZERO_BASED, 0);
4539
4540         fwqe2->length_hi = 0; /* Todo - figure this out... why length is only 32bit.. */
4541         fwqe2->length_lo = wr->wr.fast_reg.length;
4542         fwqe2->pbl_addr.hi = upper_32_bits(frmr_list->info.pbl_table->pa);
4543         fwqe2->pbl_addr.lo = lower_32_bits(frmr_list->info.pbl_table->pa);
4544
4545         /* produce another wqe for fwqe3 */
4546         ecore_chain_produce(&qp->sq.pbl);
4547
4548         fbo = wr->wr.fast_reg.iova_start -
4549             (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
4550
4551         QL_DPRINT12(ha, "wr.fast_reg.iova_start = %p rkey=%x addr=%x:%x"
4552                 " length = %x pbl_addr %x:%x\n",
4553                 wr->wr.fast_reg.iova_start, wr->wr.fast_reg.rkey,
4554                 fwqe1->addr.hi, fwqe1->addr.lo, fwqe2->length_lo,
4555                 fwqe2->pbl_addr.hi, fwqe2->pbl_addr.lo);
4556
4557         build_frmr_pbes(dev, wr, &frmr_list->info);
4558
4559         qp->wqe_wr_id[qp->sq.prod].frmr = frmr_list;
4560
4561         QL_DPRINT12(ha, "exit\n");
4562         return 0;
4563 }
4564
4565 #endif /* #if __FreeBSD_version >= 1102000 */
4566
4567 static enum ib_wc_opcode
4568 qlnxr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
4569 {
4570         switch (opcode) {
4571         case IB_WR_RDMA_WRITE:
4572         case IB_WR_RDMA_WRITE_WITH_IMM:
4573                 return IB_WC_RDMA_WRITE;
4574         case IB_WR_SEND_WITH_IMM:
4575         case IB_WR_SEND:
4576         case IB_WR_SEND_WITH_INV:
4577                 return IB_WC_SEND;
4578         case IB_WR_RDMA_READ:
4579                 return IB_WC_RDMA_READ;
4580         case IB_WR_ATOMIC_CMP_AND_SWP:
4581                 return IB_WC_COMP_SWAP;
4582         case IB_WR_ATOMIC_FETCH_AND_ADD:
4583                 return IB_WC_FETCH_ADD;
4584
4585 #if __FreeBSD_version >= 1102000
4586         case IB_WR_REG_MR:
4587                 return IB_WC_REG_MR;
4588 #else
4589         case IB_WR_FAST_REG_MR:
4590                 return IB_WC_FAST_REG_MR;
4591 #endif /* #if __FreeBSD_version >= 1102000 */
4592
4593         case IB_WR_LOCAL_INV:
4594                 return IB_WC_LOCAL_INV;
4595         default:
4596                 return IB_WC_SEND;
4597         }
4598 }
4599 static inline bool
4600 qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
4601 {
4602         int wq_is_full, err_wr, pbl_is_full;
4603         struct qlnxr_dev *dev = qp->dev;
4604         qlnx_host_t     *ha;
4605
4606         ha = dev->ha;
4607
4608         QL_DPRINT12(ha, "enter[qp, wr] = [%p,%p]\n", qp, wr);
4609  
4610         /* prevent SQ overflow and/or processing of a bad WR */
4611         err_wr = wr->num_sge > qp->sq.max_sges;
4612         wq_is_full = qlnxr_wq_is_full(&qp->sq);
4613         pbl_is_full = ecore_chain_get_elem_left_u32(&qp->sq.pbl) <
4614                       QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
4615         if (wq_is_full || err_wr || pbl_is_full) {
4616                 if (wq_is_full &&
4617                     !(qp->err_bitmap & QLNXR_QP_ERR_SQ_FULL)) {
4618
4619                         qp->err_bitmap |= QLNXR_QP_ERR_SQ_FULL;
4620
4621                         QL_DPRINT12(ha,
4622                                 "error: WQ is full. Post send on QP failed"
4623                                 " (this error appears only once) "
4624                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4625                                 qp, wr, qp->err_bitmap);
4626                 }
4627
4628                 if (err_wr &&
4629                     !(qp->err_bitmap & QLNXR_QP_ERR_BAD_SR)) {
4630
4631                         qp->err_bitmap |= QLNXR_QP_ERR_BAD_SR;
4632
4633                         QL_DPRINT12(ha,
4634                                 "error: WQ is bad. Post send on QP failed"
4635                                 " (this error appears only once) "
4636                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4637                                 qp, wr, qp->err_bitmap);
4638                 }
4639
4640                 if (pbl_is_full &&
4641                     !(qp->err_bitmap & QLNXR_QP_ERR_SQ_PBL_FULL)) {
4642
4643                         qp->err_bitmap |= QLNXR_QP_ERR_SQ_PBL_FULL;
4644
4645                         QL_DPRINT12(ha,
4646                                 "error: WQ PBL is full. Post send on QP failed"
4647                                 " (this error appears only once) "
4648                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4649                                 qp, wr, qp->err_bitmap);
4650                 }
4651                 return false;
4652         }
4653         QL_DPRINT12(ha, "exit[qp, wr] = [%p,%p]\n", qp, wr);
4654         return true;
4655 }
4656
4657 int
4658 qlnxr_post_send(struct ib_qp *ibqp,
4659         struct ib_send_wr *wr,
4660         struct ib_send_wr **bad_wr)
4661 {
4662         struct qlnxr_dev        *dev = get_qlnxr_dev(ibqp->device);
4663         struct qlnxr_qp         *qp = get_qlnxr_qp(ibqp);
4664         unsigned long           flags;
4665         int                     status = 0, rc = 0;
4666         bool                    comp;
4667         qlnx_host_t             *ha;
4668         uint32_t                reg_addr;
4669  
4670         *bad_wr = NULL;
4671         ha = dev->ha;
4672
4673         QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4674                 ibqp, wr, bad_wr);
4675
4676         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4677                 return -EINVAL;
4678
4679         if (qp->qp_type == IB_QPT_GSI)
4680                 return qlnxr_gsi_post_send(ibqp, wr, bad_wr);
4681
4682         spin_lock_irqsave(&qp->q_lock, flags);
4683
4684         if (QLNX_IS_ROCE(dev) && (qp->state != ECORE_ROCE_QP_STATE_RTS) &&
4685             (qp->state != ECORE_ROCE_QP_STATE_ERR) &&
4686             (qp->state != ECORE_ROCE_QP_STATE_SQD)) {
4687                 spin_unlock_irqrestore(&qp->q_lock, flags);
4688                 *bad_wr = wr;
4689                 QL_DPRINT11(ha, "QP in wrong state! QP icid=0x%x state %d\n",
4690                         qp->icid, qp->state);
4691                 return -EINVAL;
4692         }
4693
4694         if (!wr) {
4695                 QL_DPRINT11(ha, "Got an empty post send???\n");
4696         }
4697
4698         while (wr) {
4699                 struct rdma_sq_common_wqe       *wqe;
4700                 struct rdma_sq_send_wqe         *swqe;
4701                 struct rdma_sq_send_wqe         *swqe2;
4702                 struct rdma_sq_rdma_wqe_1st     *rwqe;
4703                 struct rdma_sq_rdma_wqe_2nd     *rwqe2;
4704                 struct rdma_sq_local_inv_wqe    *iwqe;
4705                 struct rdma_sq_atomic_wqe       *awqe1;
4706                 struct rdma_sq_atomic_wqe       *awqe2;
4707                 struct rdma_sq_atomic_wqe       *awqe3;
4708                 struct rdma_sq_fmr_wqe_1st      *fwqe1;
4709
4710                 if (!qlnxr_can_post_send(qp, wr)) {
4711                         status = -ENOMEM;
4712                         *bad_wr = wr;
4713                         break;
4714                 }
4715
4716                 wqe = ecore_chain_produce(&qp->sq.pbl);
4717
4718                 qp->wqe_wr_id[qp->sq.prod].signaled =
4719                         !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
4720
4721                 /* common fields */
4722                 wqe->flags = 0;
4723                 wqe->flags |= (RDMA_SQ_SEND_WQE_COMP_FLG_MASK <<
4724                                 RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT);
4725
4726                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, \
4727                         !!(wr->send_flags & IB_SEND_SOLICITED));
4728
4729                 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) ||
4730                                 (qp->signaled);
4731
4732                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
4733                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,  \
4734                         !!(wr->send_flags & IB_SEND_FENCE));
4735
4736                 wqe->prev_wqe_size = qp->prev_wqe_size;
4737
4738                 qp->wqe_wr_id[qp->sq.prod].opcode = qlnxr_ib_to_wc_opcode(wr->opcode);
4739
4740
4741                 switch (wr->opcode) {
4742
4743                 case IB_WR_SEND_WITH_IMM:
4744
4745                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
4746                         swqe = (struct rdma_sq_send_wqe *)wqe;
4747                         swqe->wqe_size = 2;
4748                         swqe2 = (struct rdma_sq_send_wqe *)
4749                                         ecore_chain_produce(&qp->sq.pbl);
4750                         swqe->inv_key_or_imm_data =
4751                                 cpu_to_le32(wr->ex.imm_data);
4752                         swqe->length = cpu_to_le32(
4753                                                 qlnxr_prepare_sq_send_data(dev,
4754                                                         qp, swqe, swqe2, wr,
4755                                                         bad_wr));
4756
4757                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4758                         qp->prev_wqe_size = swqe->wqe_size;
4759                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4760
4761                         QL_DPRINT12(ha, "SEND w/ IMM length = %d imm data=%x\n",
4762                                 swqe->length, wr->ex.imm_data);
4763
4764                         break;
4765
4766                 case IB_WR_SEND:
4767
4768                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
4769                         swqe = (struct rdma_sq_send_wqe *)wqe;
4770
4771                         swqe->wqe_size = 2;
4772                         swqe2 = (struct rdma_sq_send_wqe *)
4773                                         ecore_chain_produce(&qp->sq.pbl);
4774                         swqe->length = cpu_to_le32(
4775                                                 qlnxr_prepare_sq_send_data(dev,
4776                                                         qp, swqe, swqe2, wr,
4777                                                         bad_wr));
4778                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4779                         qp->prev_wqe_size = swqe->wqe_size;
4780                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4781
4782                         QL_DPRINT12(ha, "SEND w/o IMM length = %d\n",
4783                                 swqe->length);
4784
4785                         break;
4786
4787                 case IB_WR_SEND_WITH_INV:
4788
4789                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
4790                         swqe = (struct rdma_sq_send_wqe *)wqe;
4791                         swqe2 = (struct rdma_sq_send_wqe *)
4792                                         ecore_chain_produce(&qp->sq.pbl);
4793                         swqe->wqe_size = 2;
4794                         swqe->inv_key_or_imm_data =
4795                                 cpu_to_le32(wr->ex.invalidate_rkey);
4796                         swqe->length = cpu_to_le32(qlnxr_prepare_sq_send_data(dev,
4797                                                 qp, swqe, swqe2, wr, bad_wr));
4798                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4799                         qp->prev_wqe_size = swqe->wqe_size;
4800                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4801
4802                         QL_DPRINT12(ha, "SEND w INVALIDATE length = %d\n",
4803                                 swqe->length);
4804                         break;
4805
4806                 case IB_WR_RDMA_WRITE_WITH_IMM:
4807
4808                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
4809                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4810
4811                         rwqe->wqe_size = 2;
4812                         rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
4813                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4814                                         ecore_chain_produce(&qp->sq.pbl);
4815                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4816                                                 qp, rwqe, rwqe2, wr, bad_wr));
4817                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4818                         qp->prev_wqe_size = rwqe->wqe_size;
4819                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4820
4821                         QL_DPRINT12(ha,
4822                                 "RDMA WRITE w/ IMM length = %d imm data=%x\n",
4823                                 rwqe->length, rwqe->imm_data);
4824
4825                         break;
4826
4827                 case IB_WR_RDMA_WRITE:
4828
4829                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
4830                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4831
4832                         rwqe->wqe_size = 2;
4833                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4834                                         ecore_chain_produce(&qp->sq.pbl);
4835                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4836                                                 qp, rwqe, rwqe2, wr, bad_wr));
4837                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4838                         qp->prev_wqe_size = rwqe->wqe_size;
4839                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4840
4841                         QL_DPRINT12(ha,
4842                                 "RDMA WRITE w/o IMM length = %d\n",
4843                                 rwqe->length);
4844
4845                         break;
4846
4847                 case IB_WR_RDMA_READ_WITH_INV:
4848
4849                         QL_DPRINT12(ha,
4850                                 "RDMA READ WITH INVALIDATE not supported\n");
4851
4852                         *bad_wr = wr;
4853                         rc = -EINVAL;
4854
4855                         break;
4856
4857                 case IB_WR_RDMA_READ:
4858
4859                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
4860                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4861
4862                         rwqe->wqe_size = 2;
4863                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4864                                         ecore_chain_produce(&qp->sq.pbl);
4865                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4866                                                 qp, rwqe, rwqe2, wr, bad_wr));
4867
4868                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4869                         qp->prev_wqe_size = rwqe->wqe_size;
4870                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4871
4872                         QL_DPRINT12(ha, "RDMA READ length = %d\n",
4873                                 rwqe->length);
4874
4875                         break;
4876
4877                 case IB_WR_ATOMIC_CMP_AND_SWP:
4878                 case IB_WR_ATOMIC_FETCH_AND_ADD:
4879
4880                         QL_DPRINT12(ha,
4881                                 "ATOMIC operation = %s\n",
4882                                 ((wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) ?
4883                                         "IB_WR_ATOMIC_CMP_AND_SWP" : 
4884                                         "IB_WR_ATOMIC_FETCH_AND_ADD"));
4885
4886                         awqe1 = (struct rdma_sq_atomic_wqe *)wqe;
4887                         awqe1->prev_wqe_size = 4;
4888
4889                         awqe2 = (struct rdma_sq_atomic_wqe *)
4890                                         ecore_chain_produce(&qp->sq.pbl);
4891
4892                         TYPEPTR_ADDR_SET(awqe2, remote_va, \
4893                                 atomic_wr(wr)->remote_addr);
4894
4895                         awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
4896
4897                         awqe3 = (struct rdma_sq_atomic_wqe *)
4898                                         ecore_chain_produce(&qp->sq.pbl);
4899
4900                         if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
4901                                 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
4902                                 TYPEPTR_ADDR_SET(awqe3, swap_data,
4903                                                  atomic_wr(wr)->compare_add);
4904                         } else {
4905                                 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
4906                                 TYPEPTR_ADDR_SET(awqe3, swap_data,
4907                                                  atomic_wr(wr)->swap);
4908                                 TYPEPTR_ADDR_SET(awqe3, cmp_data,
4909                                                  atomic_wr(wr)->compare_add);
4910                         }
4911
4912                         qlnxr_prepare_sq_sges(dev, qp, NULL, wr);
4913
4914                         qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->prev_wqe_size;
4915                         qp->prev_wqe_size = awqe1->prev_wqe_size;
4916
4917                         break;
4918
4919                 case IB_WR_LOCAL_INV:
4920
4921                         QL_DPRINT12(ha,
4922                                 "INVALIDATE length (IB_WR_LOCAL_INV)\n");
4923
4924                         iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
4925                         iwqe->prev_wqe_size = 1;
4926
4927                         iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
4928                         iwqe->inv_l_key = wr->ex.invalidate_rkey;
4929                         qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->prev_wqe_size;
4930                         qp->prev_wqe_size = iwqe->prev_wqe_size;
4931
4932                         break;
4933
4934 #if __FreeBSD_version >= 1102000
4935
4936                 case IB_WR_REG_MR:
4937
4938                         QL_DPRINT12(ha, "IB_WR_REG_MR\n");
4939
4940                         wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4941                         fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4942                         fwqe1->wqe_size = 2;
4943
4944                         rc = qlnxr_prepare_reg(qp, fwqe1, reg_wr(wr));
4945                         if (rc) {
4946                                 QL_DPRINT11(ha, "IB_WR_REG_MR failed rc=%d\n", rc);
4947                                 *bad_wr = wr;
4948                                 break;
4949                         }
4950
4951                         qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
4952                         qp->prev_wqe_size = fwqe1->wqe_size;
4953
4954                         break;
4955 #else
4956                 case IB_WR_FAST_REG_MR:
4957
4958                         QL_DPRINT12(ha, "FAST_MR (IB_WR_FAST_REG_MR)\n");
4959
4960                         wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4961                         fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4962                         fwqe1->prev_wqe_size = 3;
4963
4964                         rc = qlnxr_prepare_fmr(qp, fwqe1, wr);
4965
4966                         if (rc) {
4967                                 QL_DPRINT12(ha,
4968                                         "FAST_MR (IB_WR_FAST_REG_MR) failed"
4969                                         " rc = %d\n", rc);
4970                                 *bad_wr = wr;
4971                                 break;
4972                         }
4973
4974                         qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->prev_wqe_size;
4975                         qp->prev_wqe_size = fwqe1->prev_wqe_size;
4976
4977                         break;
4978 #endif /* #if __FreeBSD_version >= 1102000 */
4979
4980                 default:
4981
4982                         QL_DPRINT12(ha, "Invalid Opcode 0x%x!\n", wr->opcode);
4983
4984                         rc = -EINVAL;
4985                         *bad_wr = wr;
4986                         break;
4987                 }
4988
4989                 if (*bad_wr) {
4990                         /*
4991                          * restore prod to its position before this WR was processed
4992                          */
4993                         ecore_chain_set_prod(&qp->sq.pbl,
4994                              le16_to_cpu(qp->sq.db_data.data.value),
4995                              wqe);
4996                         /* restore prev_wqe_size */
4997                         qp->prev_wqe_size = wqe->prev_wqe_size;
4998                         status = rc;
4999
5000                         QL_DPRINT12(ha, "failed *bad_wr = %p\n", *bad_wr);
5001                         break; /* out of the loop */
5002                 }
5003
5004                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
5005
5006                 qlnxr_inc_sw_prod(&qp->sq);
5007
5008                 qp->sq.db_data.data.value++;
5009
5010                 wr = wr->next;
5011         }
5012
5013         /* Trigger doorbell
5014          * If there was a failure in the first WR then it will be triggered in
5015          * vane. However this is not harmful (as long as the producer value is
5016          * unchanged). For performance reasons we avoid checking for this
5017          * redundant doorbell.
5018          */
5019         wmb();
5020         //writel(qp->sq.db_data.raw, qp->sq.db);
5021
5022         reg_addr = (uint32_t)((uint8_t *)qp->sq.db - (uint8_t *)ha->cdev.doorbells);
5023         bus_write_4(ha->pci_dbells, reg_addr, qp->sq.db_data.raw);
5024         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5025
5026         mmiowb();
5027
5028         spin_unlock_irqrestore(&qp->q_lock, flags);
5029
5030         QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
5031                 ibqp, wr, bad_wr);
5032
5033         return status;
5034 }
5035
5036 static u32
5037 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
5038 {
5039         u32 used;
5040
5041         /* Calculate number of elements used based on producer
5042          * count and consumer count and subtract it from max
5043          * work request supported so that we get elements left.
5044          */
5045         used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
5046
5047         return hw_srq->max_wr - used;
5048 }
5049
5050
5051 int
5052 qlnxr_post_recv(struct ib_qp *ibqp,
5053         struct ib_recv_wr *wr,
5054         struct ib_recv_wr **bad_wr)
5055 {
5056         struct qlnxr_qp         *qp = get_qlnxr_qp(ibqp);
5057         struct qlnxr_dev        *dev = qp->dev;
5058         unsigned long           flags;
5059         int                     status = 0;
5060         qlnx_host_t             *ha;
5061         uint32_t                reg_addr;
5062
5063         ha = dev->ha;
5064
5065         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5066                 return -EINVAL;
5067
5068         QL_DPRINT12(ha, "enter\n");
5069  
5070         if (qp->qp_type == IB_QPT_GSI) {
5071                 QL_DPRINT12(ha, "(qp->qp_type = IB_QPT_GSI)\n");
5072                 return qlnxr_gsi_post_recv(ibqp, wr, bad_wr);
5073         }
5074
5075         if (qp->srq) {
5076                 QL_DPRINT11(ha, "qp->srq [%p]"
5077                         " QP is associated with SRQ, cannot post RQ buffers\n",
5078                         qp->srq);
5079                 return -EINVAL;
5080         }
5081
5082         spin_lock_irqsave(&qp->q_lock, flags);
5083
5084         if (qp->state == ECORE_ROCE_QP_STATE_RESET) {
5085                 spin_unlock_irqrestore(&qp->q_lock, flags);
5086                 *bad_wr = wr;
5087
5088                 QL_DPRINT11(ha, "qp->qp_type = ECORE_ROCE_QP_STATE_RESET\n");
5089
5090                 return -EINVAL;
5091         }
5092
5093         while (wr) {
5094                 int i;
5095
5096                 if ((ecore_chain_get_elem_left_u32(&qp->rq.pbl) <
5097                         QLNXR_MAX_RQE_ELEMENTS_PER_RQE) ||
5098                         (wr->num_sge > qp->rq.max_sges)) {
5099                         status = -ENOMEM;
5100                         *bad_wr = wr;
5101                         break;
5102                 }
5103                 for (i = 0; i < wr->num_sge; i++) {
5104                         u32 flags = 0;
5105                         struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
5106
5107                         /* first one must include the number of SGE in the list */
5108                         if (!i)
5109                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge);
5110
5111                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, wr->sg_list[i].lkey);
5112
5113                         RQ_SGE_SET(rqe, wr->sg_list[i].addr, \
5114                                 wr->sg_list[i].length, flags);
5115                 }
5116                 /* Special case of no sges. FW requires between 1-4 sges...
5117                  * in this case we need to post 1 sge with length zero. this is
5118                  * because rdma write with immediate consumes an RQ. */
5119                 if (!wr->num_sge) {
5120                         u32 flags = 0;
5121                         struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
5122
5123                         /* first one must include the number of SGE in the list */
5124                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
5125                         SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
5126
5127                         //RQ_SGE_SET(rqe, 0, 0, flags);
5128                         rqe->addr.hi = 0;
5129                         rqe->addr.lo = 0;
5130
5131                         rqe->length = 0;
5132                         rqe->flags = cpu_to_le32(flags);
5133
5134                         i = 1;
5135                 }
5136
5137                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
5138                 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
5139
5140                 qlnxr_inc_sw_prod(&qp->rq);
5141
5142                 wmb();
5143
5144                 qp->rq.db_data.data.value++;
5145
5146         //      writel(qp->rq.db_data.raw, qp->rq.db);
5147                 mmiowb();
5148         //      if (QLNX_IS_IWARP(dev)) {
5149         //              writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
5150         //              mmiowb(); /* for second doorbell */
5151         //      }
5152
5153                 reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
5154                                 (uint8_t *)ha->cdev.doorbells);
5155
5156                 bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
5157                 bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5158
5159                 if (QLNX_IS_IWARP(dev)) {
5160                         reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
5161                                                 (uint8_t *)ha->cdev.doorbells);
5162                         bus_write_4(ha->pci_dbells, reg_addr, \
5163                                 qp->rq.iwarp_db2_data.raw);
5164                         bus_barrier(ha->pci_dbells,  0, 0, \
5165                                 BUS_SPACE_BARRIER_READ);
5166                 }
5167
5168                 wr = wr->next;
5169         }
5170
5171         spin_unlock_irqrestore(&qp->q_lock, flags);
5172
5173         QL_DPRINT12(ha, "exit status = 0x%x\n", status);
5174
5175         return status;
5176 }
5177
5178 /* In fmr we need to increase the number of fmr completed counter for the fmr
5179  * algorithm determining whether we can free a pbl or not.
5180  * we need to perform this whether the work request was signaled or not. for
5181  * this purpose we call this function from the condition that checks if a wr
5182  * should be skipped, to make sure we don't miss it ( possibly this fmr
5183  * operation was not signalted)
5184  */
5185 static inline void
5186 qlnxr_chk_if_fmr(struct qlnxr_qp *qp)
5187 {
5188 #if __FreeBSD_version >= 1102000
5189
5190         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
5191                 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5192 #else
5193         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_FAST_REG_MR)
5194                 qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5195
5196 #endif /* #if __FreeBSD_version >= 1102000 */
5197 }
5198
5199 static int
5200 process_req(struct qlnxr_dev *dev,
5201         struct qlnxr_qp *qp,
5202         struct qlnxr_cq *cq,
5203         int num_entries,
5204         struct ib_wc *wc,
5205         u16 hw_cons,
5206         enum ib_wc_status status,
5207         int force)
5208 {
5209         u16             cnt = 0;
5210         qlnx_host_t     *ha = dev->ha;
5211
5212         QL_DPRINT12(ha, "enter\n");
5213  
5214         while (num_entries && qp->sq.wqe_cons != hw_cons) {
5215                 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
5216                         qlnxr_chk_if_fmr(qp);
5217                         /* skip WC */
5218                         goto next_cqe;
5219                 }
5220
5221                 /* fill WC */
5222                 wc->status = status;
5223                 wc->vendor_err = 0;
5224                 wc->wc_flags = 0;
5225                 wc->src_qp = qp->id;
5226                 wc->qp = &qp->ibqp;
5227
5228                 // common section
5229                 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
5230                 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
5231
5232                 switch (wc->opcode) {
5233
5234                 case IB_WC_RDMA_WRITE:
5235
5236                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
5237
5238                         QL_DPRINT12(ha,
5239                                 "opcode = IB_WC_RDMA_WRITE bytes = %d\n",
5240                                 qp->wqe_wr_id[qp->sq.cons].bytes_len);
5241                         break;
5242
5243                 case IB_WC_COMP_SWAP:
5244                 case IB_WC_FETCH_ADD:
5245                         wc->byte_len = 8;
5246                         break;
5247
5248 #if __FreeBSD_version >= 1102000
5249                 case IB_WC_REG_MR:
5250                         qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5251                         break;
5252 #else
5253                 case IB_WC_FAST_REG_MR:
5254                         qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5255                         break;
5256 #endif /* #if __FreeBSD_version >= 1102000 */
5257
5258                 case IB_WC_RDMA_READ:
5259                 case IB_WC_SEND:
5260
5261                         QL_DPRINT12(ha, "opcode = 0x%x \n", wc->opcode);
5262                         break;
5263                 default:
5264                         ;//DP_ERR("TBD ERROR");
5265                 }
5266
5267                 num_entries--;
5268                 wc++;
5269                 cnt++;
5270 next_cqe:
5271                 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
5272                         ecore_chain_consume(&qp->sq.pbl);
5273                 qlnxr_inc_sw_cons(&qp->sq);
5274         }
5275
5276         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5277         return cnt;
5278 }
5279
5280 static int
5281 qlnxr_poll_cq_req(struct qlnxr_dev *dev,
5282         struct qlnxr_qp *qp,
5283         struct qlnxr_cq *cq,
5284         int num_entries,
5285         struct ib_wc *wc,
5286         struct rdma_cqe_requester *req)
5287 {
5288         int             cnt = 0;
5289         qlnx_host_t     *ha = dev->ha;
5290
5291         QL_DPRINT12(ha, "enter req->status = 0x%x\n", req->status);
5292  
5293         switch (req->status) {
5294
5295         case RDMA_CQE_REQ_STS_OK:
5296
5297                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5298                         IB_WC_SUCCESS, 0);
5299                 break;
5300
5301         case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
5302
5303                 if (qp->state != ECORE_ROCE_QP_STATE_ERR)
5304                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5305                                   IB_WC_WR_FLUSH_ERR, 1);
5306                 break;
5307
5308         default: /* other errors case */
5309
5310                 /* process all WQE before the cosumer */
5311                 qp->state = ECORE_ROCE_QP_STATE_ERR;
5312                 cnt = process_req(dev, qp, cq, num_entries, wc,
5313                                 req->sq_cons - 1, IB_WC_SUCCESS, 0);
5314                 wc += cnt;
5315                 /* if we have extra WC fill it with actual error info */
5316
5317                 if (cnt < num_entries) {
5318                         enum ib_wc_status wc_status;
5319
5320                         switch (req->status) {
5321                         case    RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
5322                                 wc_status = IB_WC_BAD_RESP_ERR;
5323                                 break;
5324                         case    RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
5325                                 wc_status = IB_WC_LOC_LEN_ERR;
5326                                 break;
5327                         case    RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
5328                                 wc_status = IB_WC_LOC_QP_OP_ERR;
5329                                 break;
5330                         case    RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
5331                                 wc_status = IB_WC_LOC_PROT_ERR;
5332                                 break;
5333                         case    RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
5334                                 wc_status = IB_WC_MW_BIND_ERR;
5335                                 break;
5336                         case    RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
5337                                 wc_status = IB_WC_REM_INV_REQ_ERR;
5338                                 break;
5339                         case    RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
5340                                 wc_status = IB_WC_REM_ACCESS_ERR;
5341                                 break;
5342                         case    RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
5343                                 wc_status = IB_WC_REM_OP_ERR;
5344                                 break;
5345                         case    RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
5346                                 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
5347                                 break;
5348                         case    RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
5349                                 wc_status = IB_WC_RETRY_EXC_ERR;
5350                                 break;
5351                         default:
5352                                 wc_status = IB_WC_GENERAL_ERR;
5353                         }
5354
5355                         cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
5356                                         wc_status, 1 /* force use of WC */);
5357                 }
5358         }
5359
5360         QL_DPRINT12(ha, "exit cnt = %d\n", cnt);
5361         return cnt;
5362 }
5363
5364 static void
5365 __process_resp_one(struct qlnxr_dev *dev,
5366         struct qlnxr_qp *qp,
5367         struct qlnxr_cq *cq,
5368         struct ib_wc *wc,
5369         struct rdma_cqe_responder *resp,
5370         u64 wr_id)
5371 {
5372         enum ib_wc_status       wc_status = IB_WC_SUCCESS;
5373 #if __FreeBSD_version < 1102000
5374         u8                      flags;
5375 #endif
5376         qlnx_host_t             *ha = dev->ha;
5377
5378         QL_DPRINT12(ha, "enter qp = %p resp->status = 0x%x\n",
5379                 qp, resp->status);
5380  
5381         wc->opcode = IB_WC_RECV;
5382         wc->wc_flags = 0;
5383
5384         switch (resp->status) {
5385
5386         case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
5387                 wc_status = IB_WC_LOC_ACCESS_ERR;
5388                 break;
5389
5390         case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
5391                 wc_status = IB_WC_LOC_LEN_ERR;
5392                 break;
5393
5394         case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
5395                 wc_status = IB_WC_LOC_QP_OP_ERR;
5396                 break;
5397
5398         case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
5399                 wc_status = IB_WC_LOC_PROT_ERR;
5400                 break;
5401
5402         case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
5403                 wc_status = IB_WC_MW_BIND_ERR;
5404                 break;
5405
5406         case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
5407                 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
5408                 break;
5409
5410         case RDMA_CQE_RESP_STS_OK:
5411
5412 #if __FreeBSD_version >= 1102000
5413                 if (resp->flags & QLNXR_RESP_IMM) {
5414                         wc->ex.imm_data =
5415                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5416                         wc->wc_flags |= IB_WC_WITH_IMM;
5417
5418                         if (resp->flags & QLNXR_RESP_RDMA)
5419                                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5420
5421                         if (resp->flags & QLNXR_RESP_INV) {
5422                                 QL_DPRINT11(ha,
5423                                         "Invalid flags QLNXR_RESP_INV [0x%x]"
5424                                         "qp = %p qp->id = 0x%x cq = %p"
5425                                         " cq->icid = 0x%x\n",
5426                                         resp->flags, qp, qp->id, cq, cq->icid );
5427                         }
5428                 } else if (resp->flags & QLNXR_RESP_INV) {
5429                         wc->ex.imm_data =
5430                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5431                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
5432
5433                         if (resp->flags & QLNXR_RESP_RDMA) {
5434                                 QL_DPRINT11(ha,
5435                                         "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5436                                         "qp = %p qp->id = 0x%x cq = %p"
5437                                         " cq->icid = 0x%x\n",
5438                                         resp->flags, qp, qp->id, cq, cq->icid );
5439                         }
5440                 } else if (resp->flags & QLNXR_RESP_RDMA) {
5441                         QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5442                                 "qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5443                                 resp->flags, qp, qp->id, cq, cq->icid );
5444                 }
5445 #else
5446                 wc_status = IB_WC_SUCCESS;
5447                 wc->byte_len = le32_to_cpu(resp->length);
5448
5449                 flags = resp->flags & QLNXR_RESP_RDMA_IMM;
5450
5451                 switch (flags) {
5452
5453                 case QLNXR_RESP_RDMA_IMM:
5454                         /* update opcode */
5455                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5456                         /* fall to set imm data */
5457                 case QLNXR_RESP_IMM:
5458                         wc->ex.imm_data =
5459                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5460                         wc->wc_flags |= IB_WC_WITH_IMM;
5461                         break;
5462                 case QLNXR_RESP_RDMA:
5463                         QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5464                                 "qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5465                                 resp->flags, qp, qp->id, cq, cq->icid );
5466                         break;
5467                 default:
5468                         /* valid configuration, but nothing todo here */
5469                         ;
5470                 }
5471 #endif /* #if __FreeBSD_version >= 1102000 */
5472
5473                 break;
5474         default:
5475                 wc_status = IB_WC_GENERAL_ERR;
5476         }
5477
5478         /* fill WC */
5479         wc->status = wc_status;
5480         wc->vendor_err = 0;
5481         wc->src_qp = qp->id;
5482         wc->qp = &qp->ibqp;
5483         wc->wr_id = wr_id;
5484
5485         QL_DPRINT12(ha, "exit status = 0x%x\n", wc_status);
5486
5487         return;
5488 }
5489
5490 static int
5491 process_resp_one_srq(struct qlnxr_dev *dev,
5492         struct qlnxr_qp *qp,
5493         struct qlnxr_cq *cq,
5494         struct ib_wc *wc,
5495         struct rdma_cqe_responder *resp)
5496 {
5497         struct qlnxr_srq        *srq = qp->srq;
5498         u64                     wr_id;
5499         qlnx_host_t             *ha = dev->ha;
5500
5501         QL_DPRINT12(ha, "enter\n");
5502  
5503         wr_id = HILO_U64(resp->srq_wr_id.hi, resp->srq_wr_id.lo);
5504
5505         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5506                 wc->status = IB_WC_WR_FLUSH_ERR;
5507                 wc->vendor_err = 0;
5508                 wc->wr_id = wr_id;
5509                 wc->byte_len = 0;
5510                 wc->src_qp = qp->id;
5511                 wc->qp = &qp->ibqp;
5512                 wc->wr_id = wr_id;
5513         } else {
5514                 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
5515         }
5516
5517         /* PBL is maintained in case of WR granularity.
5518          * So increment WR consumer after consuming WR
5519          */
5520         srq->hw_srq.wr_cons_cnt++;
5521
5522         QL_DPRINT12(ha, "exit\n");
5523         return 1;
5524 }
5525
5526 static int
5527 process_resp_one(struct qlnxr_dev *dev,
5528         struct qlnxr_qp *qp,
5529         struct qlnxr_cq *cq,
5530         struct ib_wc *wc,
5531         struct rdma_cqe_responder *resp)
5532 {
5533         qlnx_host_t     *ha = dev->ha;
5534         u64             wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5535
5536         QL_DPRINT12(ha, "enter\n");
5537  
5538         __process_resp_one(dev, qp, cq, wc, resp, wr_id);
5539
5540         while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5541                 ecore_chain_consume(&qp->rq.pbl);
5542         qlnxr_inc_sw_cons(&qp->rq);
5543
5544         QL_DPRINT12(ha, "exit\n");
5545         return 1;
5546 }
5547
5548 static int
5549 process_resp_flush(struct qlnxr_qp *qp,
5550         int num_entries,
5551         struct ib_wc *wc,
5552         u16 hw_cons)
5553 {
5554         u16             cnt = 0;
5555         qlnx_host_t     *ha = qp->dev->ha;
5556
5557         QL_DPRINT12(ha, "enter\n");
5558  
5559         while (num_entries && qp->rq.wqe_cons != hw_cons) {
5560                 /* fill WC */
5561                 wc->status = IB_WC_WR_FLUSH_ERR;
5562                 wc->vendor_err = 0;
5563                 wc->wc_flags = 0;
5564                 wc->src_qp = qp->id;
5565                 wc->byte_len = 0;
5566                 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5567                 wc->qp = &qp->ibqp;
5568                 num_entries--;
5569                 wc++;
5570                 cnt++;
5571                 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5572                         ecore_chain_consume(&qp->rq.pbl);
5573                 qlnxr_inc_sw_cons(&qp->rq);
5574         }
5575
5576         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5577         return cnt;
5578 }
5579
5580 static void
5581 try_consume_resp_cqe(struct qlnxr_cq *cq,
5582         struct qlnxr_qp *qp,
5583         struct rdma_cqe_responder *resp,
5584         int *update)
5585 {
5586         if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
5587                 consume_cqe(cq);
5588                 *update |= 1;
5589         }
5590 }
5591
5592 static int
5593 qlnxr_poll_cq_resp_srq(struct qlnxr_dev *dev,
5594         struct qlnxr_qp *qp,
5595         struct qlnxr_cq *cq,
5596         int num_entries,
5597         struct ib_wc *wc,
5598         struct rdma_cqe_responder *resp,
5599         int *update)
5600 {
5601         int             cnt;
5602         qlnx_host_t     *ha = dev->ha;
5603
5604         QL_DPRINT12(ha, "enter\n");
5605  
5606         cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
5607         consume_cqe(cq);
5608         *update |= 1;
5609
5610         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5611         return cnt;
5612 }
5613
5614 static int
5615 qlnxr_poll_cq_resp(struct qlnxr_dev *dev,
5616         struct qlnxr_qp *qp,
5617         struct qlnxr_cq *cq,
5618         int num_entries,
5619         struct ib_wc *wc,
5620         struct rdma_cqe_responder *resp,
5621         int *update)
5622 {
5623         int             cnt;
5624         qlnx_host_t     *ha = dev->ha;
5625
5626         QL_DPRINT12(ha, "enter\n");
5627  
5628         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5629                 cnt = process_resp_flush(qp, num_entries, wc,
5630                                 resp->rq_cons);
5631                 try_consume_resp_cqe(cq, qp, resp, update);
5632         } else {
5633                 cnt = process_resp_one(dev, qp, cq, wc, resp);
5634                 consume_cqe(cq);
5635                 *update |= 1;
5636         }
5637
5638         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5639         return cnt;
5640 }
5641
5642 static void
5643 try_consume_req_cqe(struct qlnxr_cq *cq, struct qlnxr_qp *qp,
5644         struct rdma_cqe_requester *req, int *update)
5645 {
5646         if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
5647                 consume_cqe(cq);
5648                 *update |= 1;
5649         }
5650 }
5651
5652 static void
5653 doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
5654 {
5655         uint64_t        reg_addr;
5656         qlnx_host_t     *ha = dev->ha;
5657
5658         QL_DPRINT12(ha, "enter\n");
5659  
5660         wmb();
5661         cq->db.data.agg_flags = flags;
5662         cq->db.data.value = cpu_to_le32(cons);
5663
5664         reg_addr = (uint64_t)((uint8_t *)cq->db_addr -
5665                                 (uint8_t *)(ha->cdev.doorbells));
5666
5667         bus_write_8(ha->pci_dbells, reg_addr, cq->db.raw);
5668         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5669
5670         QL_DPRINT12(ha, "exit\n");
5671         return;
5672
5673 //#ifdef __LP64__
5674 //      writeq(cq->db.raw, cq->db_addr);
5675 //#else
5676         /* Note that since the FW allows 64 bit write only, in 32bit systems
5677          * the value of db_addr must be low enough. This is currently not
5678          * enforced.
5679          */
5680 //      writel(cq->db.raw & 0xffffffff, cq->db_addr);
5681 //      mmiowb();
5682 //#endif
5683 }
5684
5685
5686 static int
5687 is_valid_cqe(struct qlnxr_cq *cq, union rdma_cqe *cqe)
5688 {
5689         struct rdma_cqe_requester *resp_cqe = &cqe->req;
5690         return (resp_cqe->flags & RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK) ==
5691                         cq->pbl_toggle;
5692 }
5693
5694 int
5695 qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
5696 {
5697         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5698         struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
5699         int             done = 0;
5700         union rdma_cqe  *cqe = cq->latest_cqe;
5701         int             update = 0;
5702         u32             old_cons, new_cons;
5703         unsigned long   flags;
5704         qlnx_host_t     *ha = dev->ha;
5705
5706         QL_DPRINT12(ha, "enter\n");
5707
5708         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5709                 return -EINVAL;
5710  
5711         if (cq->destroyed) {
5712                 QL_DPRINT11(ha, "called after destroy for cq %p (icid=%d)\n",
5713                         cq, cq->icid);
5714                 return 0;
5715         }
5716
5717         if (cq->cq_type == QLNXR_CQ_TYPE_GSI)
5718                 return qlnxr_gsi_poll_cq(ibcq, num_entries, wc);
5719
5720         spin_lock_irqsave(&cq->cq_lock, flags);
5721
5722         old_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5723
5724         while (num_entries && is_valid_cqe(cq, cqe)) {
5725                 int cnt = 0;
5726                 struct qlnxr_qp *qp;
5727                 struct rdma_cqe_requester *resp_cqe;
5728                 enum rdma_cqe_type cqe_type;
5729
5730                 /* prevent speculative reads of any field of CQE */
5731                 rmb();
5732
5733                 resp_cqe = &cqe->req;
5734                 qp = (struct qlnxr_qp *)(uintptr_t)HILO_U64(resp_cqe->qp_handle.hi,
5735                                                 resp_cqe->qp_handle.lo);
5736
5737                 if (!qp) {
5738                         QL_DPRINT11(ha, "qp = NULL\n");
5739                         break;
5740                 }
5741
5742                 wc->qp = &qp->ibqp;
5743
5744                 cqe_type = GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
5745
5746                 switch (cqe_type) {
5747                 case RDMA_CQE_TYPE_REQUESTER:
5748                         cnt = qlnxr_poll_cq_req(dev, qp, cq, num_entries,
5749                                         wc, &cqe->req);
5750                         try_consume_req_cqe(cq, qp, &cqe->req, &update);
5751                         break;
5752                 case RDMA_CQE_TYPE_RESPONDER_RQ:
5753                         cnt = qlnxr_poll_cq_resp(dev, qp, cq, num_entries,
5754                                         wc, &cqe->resp, &update);
5755                         break;
5756                 case RDMA_CQE_TYPE_RESPONDER_SRQ:
5757                         cnt = qlnxr_poll_cq_resp_srq(dev, qp, cq, num_entries,
5758                                         wc, &cqe->resp, &update);
5759                         break;
5760                 case RDMA_CQE_TYPE_INVALID:
5761                 default:
5762                         QL_DPRINT11(ha, "cqe type [0x%x] invalid\n", cqe_type);
5763                         break;
5764                 }
5765                 num_entries -= cnt;
5766                 wc += cnt;
5767                 done += cnt;
5768
5769                 cqe = cq->latest_cqe;
5770         }
5771         new_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5772
5773         cq->cq_cons += new_cons - old_cons;
5774
5775         if (update) {
5776                 /* doorbell notifies abount latest VALID entry,
5777                  * but chain already point to the next INVALID one
5778                  */
5779                 doorbell_cq(dev, cq, cq->cq_cons - 1, cq->arm_flags);
5780                 QL_DPRINT12(ha, "cq = %p cons = 0x%x "
5781                         "arm_flags = 0x%x db.icid = 0x%x\n", cq,
5782                         (cq->cq_cons - 1), cq->arm_flags, cq->db.data.icid);
5783         }
5784
5785         spin_unlock_irqrestore(&cq->cq_lock, flags);
5786
5787         QL_DPRINT12(ha, "exit\n");
5788  
5789         return done;
5790 }
5791
5792
5793 int
5794 qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
5795 {
5796         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5797         unsigned long sflags;
5798         struct qlnxr_dev *dev;
5799         qlnx_host_t     *ha;
5800
5801         dev = get_qlnxr_dev((ibcq->device));
5802         ha = dev->ha;
5803
5804         QL_DPRINT12(ha, "enter ibcq = %p flags = 0x%x "
5805                 "cp = %p cons = 0x%x cq_type = 0x%x\n", ibcq,
5806                 flags, cq, cq->cq_cons, cq->cq_type);
5807
5808         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5809                 return -EINVAL;
5810
5811         if (cq->destroyed) {
5812                 QL_DPRINT11(ha, "cq was already destroyed cq = %p icid=%d\n",
5813                         cq, cq->icid);
5814                 return -EINVAL;
5815         }
5816
5817         if (cq->cq_type == QLNXR_CQ_TYPE_GSI) {
5818                 return 0;
5819         }
5820
5821         spin_lock_irqsave(&cq->cq_lock, sflags);
5822
5823         cq->arm_flags = 0;
5824
5825         if (flags & IB_CQ_SOLICITED) {
5826                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
5827         }
5828         if (flags & IB_CQ_NEXT_COMP) {
5829                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
5830         }
5831
5832         doorbell_cq(dev, cq, (cq->cq_cons - 1), cq->arm_flags);
5833
5834         spin_unlock_irqrestore(&cq->cq_lock, sflags);
5835
5836         QL_DPRINT12(ha, "exit ibcq = %p flags = 0x%x\n", ibcq, flags);
5837         return 0;
5838 }
5839
5840
5841 static struct qlnxr_mr *
5842 __qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
5843 {
5844         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
5845         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
5846         struct qlnxr_mr *mr;
5847         int             rc = -ENOMEM;
5848         qlnx_host_t     *ha;
5849
5850         ha = dev->ha;
5851  
5852         QL_DPRINT12(ha, "enter ibpd = %p pd = %p "
5853                 " pd_id = %d max_page_list_len = %d\n",
5854                 ibpd, pd, pd->pd_id, max_page_list_len);
5855
5856         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5857         if (!mr) {
5858                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
5859                 return ERR_PTR(rc);
5860         }
5861
5862         mr->dev = dev;
5863         mr->type = QLNXR_MR_FRMR;
5864
5865         rc = qlnxr_init_mr_info(dev, &mr->info, max_page_list_len,
5866                                   1 /* allow dual layer pbl */);
5867         if (rc) {
5868                 QL_DPRINT11(ha, "qlnxr_init_mr_info failed\n");
5869                 goto err0;
5870         }
5871
5872         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
5873         if (rc) {
5874                 QL_DPRINT11(ha, "ecore_rdma_alloc_tid failed\n");
5875                 goto err0;
5876         }
5877
5878         /* index only, 18 bit long, lkey = itid << 8 | key */
5879         mr->hw_mr.tid_type = ECORE_RDMA_TID_FMR;
5880         mr->hw_mr.key = 0;
5881         mr->hw_mr.pd = pd->pd_id;
5882         mr->hw_mr.local_read = 1;
5883         mr->hw_mr.local_write = 0;
5884         mr->hw_mr.remote_read = 0;
5885         mr->hw_mr.remote_write = 0;
5886         mr->hw_mr.remote_atomic = 0;
5887         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
5888         mr->hw_mr.pbl_ptr = 0; /* Will be supplied during post */
5889         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
5890         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
5891         mr->hw_mr.fbo = 0;
5892         mr->hw_mr.length = 0;
5893         mr->hw_mr.vaddr = 0;
5894         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
5895         mr->hw_mr.phy_mr = true; /* Fast MR - True, Regular Register False */
5896         mr->hw_mr.dma_mr = false;
5897
5898         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
5899         if (rc) {
5900                 QL_DPRINT11(ha, "ecore_rdma_register_tid failed\n");
5901                 goto err1;
5902         }
5903
5904         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
5905         mr->ibmr.rkey = mr->ibmr.lkey;
5906
5907         QL_DPRINT12(ha, "exit mr = %p mr->ibmr.lkey = 0x%x\n",
5908                 mr, mr->ibmr.lkey);
5909
5910         return mr;
5911
5912 err1:
5913         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
5914 err0:
5915         kfree(mr);
5916
5917         QL_DPRINT12(ha, "exit\n");
5918
5919         return ERR_PTR(rc);
5920 }
5921
5922 #if __FreeBSD_version >= 1102000
5923
5924 struct ib_mr *
5925 qlnxr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg)
5926 {
5927         struct qlnxr_dev *dev;
5928         struct qlnxr_mr *mr;
5929         qlnx_host_t     *ha;
5930
5931         dev = get_qlnxr_dev(ibpd->device);
5932         ha = dev->ha;
5933
5934         QL_DPRINT12(ha, "enter\n");
5935
5936         if (mr_type != IB_MR_TYPE_MEM_REG)
5937                 return ERR_PTR(-EINVAL);
5938
5939         mr = __qlnxr_alloc_mr(ibpd, max_num_sg);
5940
5941         if (IS_ERR(mr))
5942                 return ERR_PTR(-EINVAL);
5943
5944         QL_DPRINT12(ha, "exit mr = %p &mr->ibmr = %p\n", mr, &mr->ibmr);
5945
5946         return &mr->ibmr;
5947 }
5948
5949 static int
5950 qlnxr_set_page(struct ib_mr *ibmr, u64 addr)
5951 {
5952         struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5953         struct qlnxr_pbl *pbl_table;
5954         struct regpair *pbe;
5955         struct qlnxr_dev *dev;
5956         qlnx_host_t     *ha;
5957         u32 pbes_in_page;
5958
5959         dev = mr->dev;
5960         ha = dev->ha;
5961
5962         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
5963                 QL_DPRINT12(ha, "fails mr->npages %d\n", mr->npages);
5964                 return -ENOMEM;
5965         }
5966
5967         QL_DPRINT12(ha, "mr->npages %d addr = %p enter\n", mr->npages,
5968                 ((void *)addr));
5969
5970         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
5971         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
5972         pbe = (struct regpair *)pbl_table->va;
5973         pbe +=  mr->npages % pbes_in_page;
5974         pbe->lo = cpu_to_le32((u32)addr);
5975         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
5976
5977         mr->npages++;
5978
5979         QL_DPRINT12(ha, "mr->npages %d addr = %p exit \n", mr->npages,
5980                 ((void *)addr));
5981         return 0;
5982 }
5983
5984 int
5985 qlnxr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
5986         int sg_nents, unsigned int *sg_offset)
5987 {
5988         int             ret;
5989         struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5990         qlnx_host_t     *ha;
5991
5992         if (mr == NULL)
5993                 return (-1);
5994
5995         if (mr->dev == NULL)
5996                 return (-1);
5997
5998         ha = mr->dev->ha;
5999
6000         QL_DPRINT12(ha, "enter\n");
6001
6002         mr->npages = 0;
6003         qlnx_handle_completed_mrs(mr->dev, &mr->info);
6004
6005         ret = ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qlnxr_set_page);
6006
6007         QL_DPRINT12(ha, "exit ret = %d\n", ret);
6008
6009         return (ret);
6010 }
6011
6012 #else
6013
6014 struct ib_mr *
6015 qlnxr_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
6016 {
6017         struct qlnxr_dev *dev;
6018         struct qlnxr_mr *mr;
6019         qlnx_host_t     *ha;
6020         struct ib_mr *ibmr = NULL;
6021
6022         dev = get_qlnxr_dev((ibpd->device));
6023         ha = dev->ha;
6024
6025         QL_DPRINT12(ha, "enter\n");
6026
6027         mr = __qlnxr_alloc_mr(ibpd, max_page_list_len);
6028
6029         if (IS_ERR(mr)) {
6030                 ibmr = ERR_PTR(-EINVAL);
6031         } else {
6032                 ibmr = &mr->ibmr;
6033         }
6034
6035         QL_DPRINT12(ha, "exit %p\n", ibmr);
6036         return (ibmr);
6037 }
6038
6039 void
6040 qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
6041 {
6042         struct qlnxr_fast_reg_page_list *frmr_list;
6043
6044         frmr_list = get_qlnxr_frmr_list(page_list);
6045  
6046         free_mr_info(frmr_list->dev, &frmr_list->info);
6047
6048         kfree(frmr_list->ibfrpl.page_list);
6049         kfree(frmr_list);
6050
6051         return;
6052 }
6053
6054 struct ib_fast_reg_page_list *
6055 qlnxr_alloc_frmr_page_list(struct ib_device *ibdev, int page_list_len)
6056 {
6057         struct qlnxr_fast_reg_page_list *frmr_list = NULL;
6058         struct qlnxr_dev                *dev;
6059         int                             size = page_list_len * sizeof(u64);
6060         int                             rc = -ENOMEM;
6061         qlnx_host_t                     *ha;
6062
6063         dev = get_qlnxr_dev(ibdev);
6064         ha = dev->ha;
6065
6066         QL_DPRINT12(ha, "enter\n");
6067
6068         frmr_list = kzalloc(sizeof(*frmr_list), GFP_KERNEL);
6069         if (!frmr_list) {
6070                 QL_DPRINT11(ha, "kzalloc(frmr_list) failed\n");
6071                 goto err;
6072         }
6073
6074         frmr_list->dev = dev;
6075         frmr_list->ibfrpl.page_list = kzalloc(size, GFP_KERNEL);
6076         if (!frmr_list->ibfrpl.page_list) {
6077                 QL_DPRINT11(ha, "frmr_list->ibfrpl.page_list = NULL failed\n");
6078                 goto err0;
6079         }
6080
6081         rc = qlnxr_init_mr_info(dev, &frmr_list->info, page_list_len,
6082                           1 /* allow dual layer pbl */);
6083         if (rc)
6084                 goto err1;
6085
6086         QL_DPRINT12(ha, "exit %p\n", &frmr_list->ibfrpl);
6087
6088         return &frmr_list->ibfrpl;
6089
6090 err1:
6091         kfree(frmr_list->ibfrpl.page_list);
6092 err0:
6093         kfree(frmr_list);
6094 err:
6095         QL_DPRINT12(ha, "exit with error\n");
6096
6097         return ERR_PTR(rc);
6098 }
6099
6100 static int
6101 qlnxr_validate_phys_buf_list(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
6102         int buf_cnt, uint64_t *total_size)
6103 {
6104         u64 size = 0;
6105
6106         *total_size = 0;
6107
6108         if (!buf_cnt || buf_list == NULL) {
6109                 QL_DPRINT11(ha,
6110                         "failed buf_list = %p buf_cnt = %d\n", buf_list, buf_cnt);
6111                 return (-1);
6112         }
6113
6114         size = buf_list->size;
6115
6116         if (!size) {
6117                 QL_DPRINT11(ha,
6118                         "failed buf_list = %p buf_cnt = %d"
6119                         " buf_list->size = 0\n", buf_list, buf_cnt);
6120                 return (-1);
6121         }
6122
6123         while (buf_cnt) {
6124
6125                 *total_size += buf_list->size;
6126
6127                 if (buf_list->size != size) {
6128                         QL_DPRINT11(ha,
6129                                 "failed buf_list = %p buf_cnt = %d"
6130                                 " all buffers should have same size\n",
6131                                 buf_list, buf_cnt);
6132                         return (-1);
6133                 }
6134
6135                 buf_list++;
6136                 buf_cnt--;
6137         }
6138         return (0);
6139 }
6140
6141 static size_t
6142 qlnxr_get_num_pages(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
6143         int buf_cnt)
6144 {
6145         int     i;
6146         size_t  num_pages = 0;
6147         u64     size;
6148
6149         for (i = 0; i < buf_cnt; i++) {
6150
6151                 size = 0;
6152                 while (size < buf_list->size) {
6153                         size += PAGE_SIZE;
6154                         num_pages++;
6155                 }
6156                 buf_list++;
6157         }
6158         return (num_pages);
6159 }
6160
6161 static void
6162 qlnxr_populate_phys_mem_pbls(struct qlnxr_dev *dev,
6163         struct ib_phys_buf *buf_list, int buf_cnt,
6164         struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
6165 {
6166         struct regpair          *pbe;
6167         struct qlnxr_pbl        *pbl_tbl;
6168         int                     pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
6169         qlnx_host_t             *ha;
6170         int                     i;
6171         u64                     pbe_addr;
6172
6173         ha = dev->ha;
6174
6175         QL_DPRINT12(ha, "enter\n");
6176
6177         if (!pbl_info) {
6178                 QL_DPRINT11(ha, "PBL_INFO not initialized\n");
6179                 return;
6180         }
6181
6182         if (!pbl_info->num_pbes) {
6183                 QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
6184                 return;
6185         }
6186
6187         /* If we have a two layered pbl, the first pbl points to the rest
6188          * of the pbls and the first entry lays on the second pbl in the table
6189          */
6190         if (pbl_info->two_layered)
6191                 pbl_tbl = &pbl[1];
6192         else
6193                 pbl_tbl = pbl;
6194
6195         pbe = (struct regpair *)pbl_tbl->va;
6196         if (!pbe) {
6197                 QL_DPRINT12(ha, "pbe is NULL\n");
6198                 return;
6199         }
6200
6201         pbe_cnt = 0;
6202
6203         for (i = 0; i < buf_cnt; i++) {
6204
6205                 pages = buf_list->size >> PAGE_SHIFT;
6206
6207                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
6208                         /* store the page address in pbe */
6209
6210                         pbe_addr = buf_list->addr + (PAGE_SIZE * pg_cnt);
6211
6212                         pbe->lo = cpu_to_le32((u32)pbe_addr);
6213                         pbe->hi = cpu_to_le32(((u32)(pbe_addr >> 32)));
6214
6215                         QL_DPRINT12(ha, "Populate pbl table:"
6216                                 " pbe->addr=0x%x:0x%x "
6217                                 " pbe_cnt = %d total_num_pbes=%d"
6218                                 " pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
6219                                 total_num_pbes, pbe);
6220
6221                         pbe_cnt ++;
6222                         total_num_pbes ++;
6223                         pbe++;
6224
6225                         if (total_num_pbes == pbl_info->num_pbes)
6226                                 return;
6227
6228                         /* if the given pbl is full storing the pbes,
6229                          * move to next pbl.  */
6230
6231                         if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
6232                                 pbl_tbl++;
6233                                 pbe = (struct regpair *)pbl_tbl->va;
6234                                 pbe_cnt = 0;
6235                         }
6236                 }
6237                 buf_list++;
6238         }
6239         QL_DPRINT12(ha, "exit\n");
6240         return;
6241 }
6242
6243 struct ib_mr *
6244 qlnxr_reg_kernel_mr(struct ib_pd *ibpd,
6245         struct ib_phys_buf *buf_list,
6246         int buf_cnt, int acc, u64 *iova_start)
6247 {
6248         int             rc = -ENOMEM;
6249         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
6250         struct qlnxr_mr *mr;
6251         struct qlnxr_pd *pd;
6252         qlnx_host_t     *ha;
6253         size_t          num_pages = 0;
6254         uint64_t        length;
6255
6256         ha = dev->ha;
6257
6258         QL_DPRINT12(ha, "enter\n");
6259
6260         pd = get_qlnxr_pd(ibpd);
6261
6262         QL_DPRINT12(ha, "pd = %d buf_list = %p, buf_cnt = %d,"
6263                 " iova_start = %p, acc = %d\n",
6264                 pd->pd_id, buf_list, buf_cnt, iova_start, acc);
6265
6266         //if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
6267         //      QL_DPRINT11(ha, "(acc & IB_ACCESS_REMOTE_WRITE &&"
6268         //              " !(acc & IB_ACCESS_LOCAL_WRITE))\n");
6269         //      return ERR_PTR(-EINVAL);
6270         //}
6271
6272         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
6273         if (!mr) {
6274                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
6275                 return ERR_PTR(rc);
6276         }
6277
6278         mr->type = QLNXR_MR_KERNEL;
6279         mr->iova_start = iova_start;
6280
6281         rc = qlnxr_validate_phys_buf_list(ha, buf_list, buf_cnt, &length);
6282         if (rc)
6283                 goto err0;
6284
6285         num_pages = qlnxr_get_num_pages(ha, buf_list, buf_cnt);
6286         if (!num_pages)
6287                 goto err0;
6288
6289         rc = qlnxr_init_mr_info(dev, &mr->info, num_pages, 1);
6290         if (rc) {
6291                 QL_DPRINT11(ha,
6292                         "qlnxr_init_mr_info failed [%d]\n", rc);
6293                 goto err1;
6294         }
6295
6296         qlnxr_populate_phys_mem_pbls(dev, buf_list, buf_cnt, mr->info.pbl_table,
6297                    &mr->info.pbl_info);
6298
6299         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
6300
6301         if (rc) {
6302                 QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
6303                 goto err1;
6304         }
6305
6306         /* index only, 18 bit long, lkey = itid << 8 | key */
6307         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
6308         mr->hw_mr.key = 0;
6309         mr->hw_mr.pd = pd->pd_id;
6310         mr->hw_mr.local_read = 1;
6311         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
6312         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
6313         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
6314         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
6315         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
6316         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
6317         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
6318         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
6319         mr->hw_mr.page_size_log = ilog2(PAGE_SIZE); /* for the MR pages */
6320
6321         mr->hw_mr.fbo = 0;
6322
6323         mr->hw_mr.length = length;
6324         mr->hw_mr.vaddr = (uint64_t)iova_start;
6325         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
6326         mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
6327         mr->hw_mr.dma_mr = false;
6328
6329         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
6330         if (rc) {
6331                 QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
6332                 goto err2;
6333         }
6334
6335         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6336         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
6337                 mr->hw_mr.remote_atomic)
6338                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6339
6340         QL_DPRINT12(ha, "lkey: %x\n", mr->ibmr.lkey);
6341
6342         return (&mr->ibmr);
6343
6344 err2:
6345         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
6346 err1:
6347         qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
6348 err0:
6349         kfree(mr);
6350
6351         QL_DPRINT12(ha, "exit [%d]\n", rc);
6352         return (ERR_PTR(rc));
6353 }
6354
6355 #endif /* #if __FreeBSD_version >= 1102000 */
6356
6357 struct ib_ah *
6358 #if __FreeBSD_version >= 1102000
6359 qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
6360         struct ib_udata *udata)
6361 #else
6362 qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
6363 #endif /* #if __FreeBSD_version >= 1102000 */
6364 {
6365         struct qlnxr_dev *dev;
6366         qlnx_host_t     *ha;
6367         struct qlnxr_ah *ah;
6368
6369         dev = get_qlnxr_dev((ibpd->device));
6370         ha = dev->ha;
6371
6372         QL_DPRINT12(ha, "in create_ah\n");
6373
6374         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
6375         if (!ah) {
6376                 QL_DPRINT12(ha, "no address handle can be allocated\n");
6377                 return ERR_PTR(-ENOMEM);
6378         }
6379         
6380         ah->attr = *attr;       
6381  
6382         return &ah->ibah;
6383 }
6384
6385 int
6386 qlnxr_destroy_ah(struct ib_ah *ibah)
6387 {
6388         struct qlnxr_dev *dev;
6389         qlnx_host_t     *ha;
6390         struct qlnxr_ah *ah = get_qlnxr_ah(ibah);
6391         
6392         dev = get_qlnxr_dev((ibah->device));
6393         ha = dev->ha;
6394
6395         QL_DPRINT12(ha, "in destroy_ah\n");
6396
6397         kfree(ah);
6398         return 0;
6399 }
6400
6401 int
6402 qlnxr_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6403 {
6404         struct qlnxr_dev *dev;
6405         qlnx_host_t     *ha;
6406
6407         dev = get_qlnxr_dev((ibah->device));
6408         ha = dev->ha;
6409         QL_DPRINT12(ha, "Query AH not supported\n");
6410         return -EINVAL;
6411 }
6412
6413 int
6414 qlnxr_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6415 {
6416         struct qlnxr_dev *dev;
6417         qlnx_host_t     *ha;
6418
6419         dev = get_qlnxr_dev((ibah->device));
6420         ha = dev->ha;
6421         QL_DPRINT12(ha, "Modify AH not supported\n");
6422         return -ENOSYS;
6423 }
6424
6425 #if __FreeBSD_version >= 1102000
6426 int
6427 qlnxr_process_mad(struct ib_device *ibdev,
6428                 int process_mad_flags,
6429                 u8 port_num,
6430                 const struct ib_wc *in_wc,
6431                 const struct ib_grh *in_grh,
6432                 const struct ib_mad_hdr *mad_hdr,
6433                 size_t in_mad_size,
6434                 struct ib_mad_hdr *out_mad,
6435                 size_t *out_mad_size,
6436                 u16 *out_mad_pkey_index)
6437
6438 #else
6439
6440 int
6441 qlnxr_process_mad(struct ib_device *ibdev,
6442                         int process_mad_flags,
6443                         u8 port_num,
6444                         struct ib_wc *in_wc,
6445                         struct ib_grh *in_grh,
6446                         struct ib_mad *in_mad,
6447                         struct ib_mad *out_mad)
6448
6449 #endif /* #if __FreeBSD_version >= 1102000 */
6450 {
6451         struct qlnxr_dev *dev;
6452         qlnx_host_t     *ha;
6453
6454         dev = get_qlnxr_dev(ibdev);
6455         ha = dev->ha;
6456         QL_DPRINT12(ha, "process mad not supported\n");
6457
6458         return -ENOSYS;
6459 //      QL_DPRINT12(ha, "qlnxr_process_mad in_mad %x %x %x %x %x %x %x %x\n",
6460 //               in_mad->mad_hdr.attr_id, in_mad->mad_hdr.base_version,
6461 //               in_mad->mad_hdr.attr_mod, in_mad->mad_hdr.class_specific,
6462 //               in_mad->mad_hdr.class_version, in_mad->mad_hdr.method,
6463 //               in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.status);
6464
6465 //      return IB_MAD_RESULT_SUCCESS;   
6466 }
6467
6468
6469 #if __FreeBSD_version >= 1102000
6470 int
6471 qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
6472         struct ib_port_immutable *immutable)
6473 {
6474         struct qlnxr_dev        *dev;
6475         qlnx_host_t             *ha;
6476         struct ib_port_attr     attr;
6477         int                     err;
6478
6479         dev = get_qlnxr_dev(ibdev);
6480         ha = dev->ha;
6481
6482         QL_DPRINT12(ha, "enter\n");
6483
6484         err = qlnxr_query_port(ibdev, port_num, &attr);
6485         if (err)
6486                 return err;
6487
6488         if (QLNX_IS_IWARP(dev)) {
6489                 immutable->pkey_tbl_len = 1;
6490                 immutable->gid_tbl_len = 1;
6491                 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
6492                 immutable->max_mad_size = 0;
6493         } else {
6494                 immutable->pkey_tbl_len = attr.pkey_tbl_len;
6495                 immutable->gid_tbl_len = attr.gid_tbl_len;
6496                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
6497                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
6498         }
6499
6500         QL_DPRINT12(ha, "exit\n");
6501         return 0;
6502 }
6503 #endif /* #if __FreeBSD_version > 1102000 */
6504
6505
6506 /***** iWARP related functions *************/
6507
6508
6509 static void
6510 qlnxr_iw_mpa_request(void *context,
6511         struct ecore_iwarp_cm_event_params *params)
6512 {
6513         struct qlnxr_iw_listener *listener = (struct qlnxr_iw_listener *)context;
6514         struct qlnxr_dev *dev = listener->dev;
6515         struct qlnxr_iw_ep *ep;
6516         struct iw_cm_event event;
6517         struct sockaddr_in *laddr;
6518         struct sockaddr_in *raddr;
6519         qlnx_host_t     *ha;
6520
6521         ha = dev->ha;
6522
6523         QL_DPRINT12(ha, "enter\n");
6524
6525         if (params->cm_info->ip_version != ECORE_TCP_IPV4) {
6526                 QL_DPRINT11(ha, "only IPv4 supported [0x%x]\n",
6527                         params->cm_info->ip_version);
6528                 return;
6529         }
6530  
6531         ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
6532
6533         if (!ep) {
6534                 QL_DPRINT11(ha, "kzalloc{ep) failed\n");
6535                 return;
6536         }
6537
6538         ep->dev = dev;
6539         ep->ecore_context = params->ep_context;
6540
6541         memset(&event, 0, sizeof(event));
6542
6543         event.event = IW_CM_EVENT_CONNECT_REQUEST;
6544         event.status = params->status;
6545
6546         laddr = (struct sockaddr_in *)&event.local_addr;
6547         raddr = (struct sockaddr_in *)&event.remote_addr;
6548
6549         laddr->sin_family = AF_INET;
6550         raddr->sin_family = AF_INET;
6551
6552         laddr->sin_port = htons(params->cm_info->local_port);
6553         raddr->sin_port = htons(params->cm_info->remote_port);
6554
6555         laddr->sin_addr.s_addr = htonl(params->cm_info->local_ip[0]);
6556         raddr->sin_addr.s_addr = htonl(params->cm_info->remote_ip[0]);
6557
6558         event.provider_data = (void *)ep;
6559         event.private_data = (void *)params->cm_info->private_data;
6560         event.private_data_len = (u8)params->cm_info->private_data_len;
6561
6562 #if __FreeBSD_version >= 1100000
6563         event.ord = params->cm_info->ord;
6564         event.ird = params->cm_info->ird;
6565 #endif /* #if __FreeBSD_version >= 1100000 */
6566
6567         listener->cm_id->event_handler(listener->cm_id, &event);
6568
6569         QL_DPRINT12(ha, "exit\n");
6570
6571         return;
6572 }
6573
6574 static void
6575 qlnxr_iw_issue_event(void *context,
6576          struct ecore_iwarp_cm_event_params *params,
6577          enum iw_cm_event_type event_type,
6578          char *str)
6579 {
6580         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6581         struct qlnxr_dev *dev = ep->dev;
6582         struct iw_cm_event event;
6583         qlnx_host_t     *ha;
6584
6585         ha = dev->ha;
6586
6587         QL_DPRINT12(ha, "enter\n");
6588
6589         memset(&event, 0, sizeof(event));
6590         event.status = params->status;
6591         event.event = event_type;
6592
6593         if (params->cm_info != NULL) {
6594 #if __FreeBSD_version >= 1100000
6595                 event.ird = params->cm_info->ird;
6596                 event.ord = params->cm_info->ord;
6597                 QL_DPRINT12(ha, "ord=[%d] \n", event.ord);
6598                 QL_DPRINT12(ha, "ird=[%d] \n", event.ird);
6599 #endif /* #if __FreeBSD_version >= 1100000 */
6600
6601                 event.private_data_len = params->cm_info->private_data_len;
6602                 event.private_data = (void *)params->cm_info->private_data;
6603                 QL_DPRINT12(ha, "private_data_len=[%d] \n",
6604                         event.private_data_len);
6605         }
6606
6607         QL_DPRINT12(ha, "event=[%d] %s\n", event.event, str);
6608         QL_DPRINT12(ha, "status=[%d] \n", event.status);
6609         
6610         if (ep) {
6611                 if (ep->cm_id)
6612                         ep->cm_id->event_handler(ep->cm_id, &event);
6613                 else
6614                         QL_DPRINT11(ha, "ep->cm_id == NULL \n");
6615         } else {
6616                 QL_DPRINT11(ha, "ep == NULL \n");
6617         }
6618
6619         QL_DPRINT12(ha, "exit\n");
6620
6621         return;
6622 }
6623
6624 static void
6625 qlnxr_iw_close_event(void *context,
6626          struct ecore_iwarp_cm_event_params *params)
6627 {
6628         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6629         struct qlnxr_dev *dev = ep->dev;
6630         qlnx_host_t     *ha;
6631
6632         ha = dev->ha;
6633
6634         QL_DPRINT12(ha, "enter\n");
6635  
6636         if (ep->cm_id) {
6637                 qlnxr_iw_issue_event(context,
6638                                     params,
6639                                     IW_CM_EVENT_CLOSE,
6640                                     "IW_CM_EVENT_EVENT_CLOSE");
6641                 ep->cm_id->rem_ref(ep->cm_id);
6642                 ep->cm_id = NULL;
6643         }
6644
6645         QL_DPRINT12(ha, "exit\n");
6646
6647         return;
6648 }
6649
6650 #if __FreeBSD_version >= 1102000
6651
6652 static void
6653 qlnxr_iw_passive_complete(void *context,
6654         struct ecore_iwarp_cm_event_params *params)
6655 {
6656         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6657         struct qlnxr_dev        *dev = ep->dev;
6658         qlnx_host_t             *ha;
6659
6660         ha = dev->ha;
6661
6662         /* We will only reach the following state if MPA_REJECT was called on
6663          * passive. In this case there will be no associated QP.
6664          */
6665         if ((params->status == -ECONNREFUSED) && (ep->qp == NULL)) {
6666                 QL_DPRINT11(ha, "PASSIVE connection refused releasing ep...\n");
6667                 kfree(ep);
6668                 return;
6669         }
6670
6671         /* We always issue an established event, however, ofed does not look
6672          * at event code for established. So if there was a failure, we follow
6673          * with close...
6674          */
6675         qlnxr_iw_issue_event(context,
6676                 params,
6677                 IW_CM_EVENT_ESTABLISHED,
6678                 "IW_CM_EVENT_ESTABLISHED");
6679
6680         if (params->status < 0) {
6681                 qlnxr_iw_close_event(context, params);
6682         }
6683
6684         return;
6685 }
6686
6687 struct qlnxr_discon_work {
6688         struct work_struct work;
6689         struct qlnxr_iw_ep *ep;
6690         enum ecore_iwarp_event_type event;
6691         int status;
6692 };
6693
6694 static void
6695 qlnxr_iw_disconnect_worker(struct work_struct *work)
6696 {
6697         struct qlnxr_discon_work *dwork =
6698                 container_of(work, struct qlnxr_discon_work, work);
6699         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
6700         struct qlnxr_iw_ep *ep = dwork->ep;
6701         struct qlnxr_dev *dev = ep->dev;
6702         struct qlnxr_qp *qp = ep->qp;
6703         struct iw_cm_event event;
6704
6705         if (qp->destroyed) {
6706                 kfree(dwork);
6707                 qlnxr_iw_qp_rem_ref(&qp->ibqp);
6708                 return;
6709         }
6710
6711         memset(&event, 0, sizeof(event));
6712         event.status = dwork->status;
6713         event.event = IW_CM_EVENT_DISCONNECT;
6714
6715         /* Success means graceful disconnect was requested. modifying
6716          * to SQD is translated to graceful disconnect. O/w reset is sent
6717          */
6718         if (dwork->status)
6719                 qp_params.new_state = ECORE_ROCE_QP_STATE_ERR;
6720         else
6721                 qp_params.new_state = ECORE_ROCE_QP_STATE_SQD;
6722
6723         kfree(dwork);
6724
6725         if (ep->cm_id)
6726                 ep->cm_id->event_handler(ep->cm_id, &event);
6727
6728         SET_FIELD(qp_params.modify_flags,
6729                   ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
6730
6731         ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
6732
6733         qlnxr_iw_qp_rem_ref(&qp->ibqp);
6734
6735         return;
6736 }
6737
6738 void
6739 qlnxr_iw_disconnect_event(void *context,
6740         struct ecore_iwarp_cm_event_params *params)
6741 {
6742         struct qlnxr_discon_work *work;
6743         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6744         struct qlnxr_dev *dev = ep->dev;
6745         struct qlnxr_qp *qp = ep->qp;
6746
6747         work = kzalloc(sizeof(*work), GFP_ATOMIC);
6748         if (!work)
6749                 return;
6750
6751         qlnxr_iw_qp_add_ref(&qp->ibqp);
6752         work->ep = ep;
6753         work->event = params->event;
6754         work->status = params->status;
6755
6756         INIT_WORK(&work->work, qlnxr_iw_disconnect_worker);
6757         queue_work(dev->iwarp_wq, &work->work);
6758
6759         return;
6760 }
6761
6762 #endif /* #if __FreeBSD_version >= 1102000 */
6763
6764 static int
6765 qlnxr_iw_mpa_reply(void *context,
6766         struct ecore_iwarp_cm_event_params *params)
6767 {
6768         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6769         struct qlnxr_dev        *dev = ep->dev;
6770         struct ecore_iwarp_send_rtr_in rtr_in;
6771         int                     rc;
6772         qlnx_host_t             *ha;
6773
6774         ha = dev->ha;
6775
6776         QL_DPRINT12(ha, "enter\n");
6777
6778         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6779                 return -EINVAL;
6780
6781         bzero(&rtr_in, sizeof(struct ecore_iwarp_send_rtr_in));
6782         rtr_in.ep_context = params->ep_context;
6783
6784         rc = ecore_iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
6785
6786         QL_DPRINT12(ha, "exit rc = %d\n", rc);
6787         return rc;
6788 }
6789
6790
6791 void
6792 qlnxr_iw_qp_event(void *context,
6793         struct ecore_iwarp_cm_event_params *params,
6794         enum ib_event_type ib_event,
6795         char *str)
6796 {
6797         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6798         struct qlnxr_dev *dev = ep->dev;
6799         struct ib_qp *ibqp = &(ep->qp->ibqp);
6800         struct ib_event event;
6801         qlnx_host_t     *ha;
6802
6803         ha = dev->ha;
6804
6805         QL_DPRINT12(ha,
6806                 "[context, event, event_handler] = [%p, 0x%x, %s, %p] enter\n",
6807                 context, params->event, str, ibqp->event_handler);
6808
6809         if (ibqp->event_handler) {
6810                 event.event = ib_event;
6811                 event.device = ibqp->device;
6812                 event.element.qp = ibqp;
6813                 ibqp->event_handler(&event, ibqp->qp_context);
6814         }
6815
6816         return;
6817 }
6818
6819 int
6820 qlnxr_iw_event_handler(void *context,
6821         struct ecore_iwarp_cm_event_params *params)
6822 {
6823         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6824         struct qlnxr_dev *dev = ep->dev;
6825         qlnx_host_t     *ha;
6826
6827         ha = dev->ha;
6828
6829         QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6830                 "enter\n", context, params->event);
6831  
6832         switch (params->event) {
6833
6834         /* Passive side request received */
6835         case ECORE_IWARP_EVENT_MPA_REQUEST:
6836                 qlnxr_iw_mpa_request(context, params);
6837                 break;
6838
6839         case ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY:
6840                 qlnxr_iw_mpa_reply(context, params);
6841                 break;
6842
6843         /* Passive side established ( ack on mpa response ) */
6844         case ECORE_IWARP_EVENT_PASSIVE_COMPLETE:
6845
6846 #if __FreeBSD_version >= 1102000
6847
6848                 ep->during_connect = 0;
6849                 qlnxr_iw_passive_complete(context, params);
6850
6851 #else
6852                 qlnxr_iw_issue_event(context,
6853                                     params,
6854                                     IW_CM_EVENT_ESTABLISHED,
6855                                     "IW_CM_EVENT_ESTABLISHED");
6856 #endif /* #if __FreeBSD_version >= 1102000 */
6857                 break;
6858
6859         /* Active side reply received */
6860         case ECORE_IWARP_EVENT_ACTIVE_COMPLETE:
6861                 ep->during_connect = 0;
6862                 qlnxr_iw_issue_event(context,
6863                                     params,
6864                                     IW_CM_EVENT_CONNECT_REPLY,
6865                                     "IW_CM_EVENT_CONNECT_REPLY");
6866                 if (params->status < 0) {
6867                         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6868
6869                         ep->cm_id->rem_ref(ep->cm_id);
6870                         ep->cm_id = NULL;
6871                 }
6872                 break;
6873
6874         case ECORE_IWARP_EVENT_DISCONNECT:
6875
6876 #if __FreeBSD_version >= 1102000
6877                 qlnxr_iw_disconnect_event(context, params);
6878 #else
6879                 qlnxr_iw_issue_event(context,
6880                                     params,
6881                                     IW_CM_EVENT_DISCONNECT,
6882                                     "IW_CM_EVENT_DISCONNECT");
6883                 qlnxr_iw_close_event(context, params);
6884 #endif /* #if __FreeBSD_version >= 1102000 */
6885                 break;
6886
6887         case ECORE_IWARP_EVENT_CLOSE:
6888                 ep->during_connect = 0;
6889                 qlnxr_iw_close_event(context, params);
6890                 break;
6891
6892         case ECORE_IWARP_EVENT_RQ_EMPTY:
6893                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6894                                  "IWARP_EVENT_RQ_EMPTY");
6895                 break;
6896
6897         case ECORE_IWARP_EVENT_IRQ_FULL:
6898                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6899                                  "IWARP_EVENT_IRQ_FULL");
6900                 break;
6901
6902         case ECORE_IWARP_EVENT_LLP_TIMEOUT:
6903                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6904                                  "IWARP_EVENT_LLP_TIMEOUT");
6905                 break;
6906
6907         case ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
6908                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6909                                  "IWARP_EVENT_REMOTE_PROTECTION_ERROR");
6910                 break;
6911
6912         case ECORE_IWARP_EVENT_CQ_OVERFLOW:
6913                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6914                                  "QED_IWARP_EVENT_CQ_OVERFLOW");
6915                 break;
6916
6917         case ECORE_IWARP_EVENT_QP_CATASTROPHIC:
6918                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6919                                  "QED_IWARP_EVENT_QP_CATASTROPHIC");
6920                 break;
6921
6922         case ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR:
6923                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6924                                  "IWARP_EVENT_LOCAL_ACCESS_ERROR");
6925                 break;
6926
6927         case ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR:
6928                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6929                                  "IWARP_EVENT_REMOTE_OPERATION_ERROR");
6930                 break;
6931
6932         case ECORE_IWARP_EVENT_TERMINATE_RECEIVED:
6933                 QL_DPRINT12(ha, "Got terminate message"
6934                         " ECORE_IWARP_EVENT_TERMINATE_RECEIVED\n");
6935                 break;
6936
6937         default:
6938                 QL_DPRINT12(ha,
6939                         "Unknown event [0x%x] received \n", params->event);
6940                 break;
6941         };
6942
6943         QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6944                 "exit\n", context, params->event);
6945         return 0;
6946 }
6947
6948 static int
6949 qlnxr_addr4_resolve(struct qlnxr_dev *dev,
6950                               struct sockaddr_in *src_in,
6951                               struct sockaddr_in *dst_in,
6952                               u8 *dst_mac)
6953 {
6954         int rc;
6955
6956 #if __FreeBSD_version >= 1100000
6957         rc = arpresolve(dev->ha->ifp, 0, NULL, (struct sockaddr *)dst_in,
6958                         dst_mac, NULL, NULL);
6959 #else
6960         struct llentry *lle;
6961
6962         rc = arpresolve(dev->ha->ifp, NULL, NULL, (struct sockaddr *)dst_in,
6963                         dst_mac, &lle);
6964 #endif
6965
6966         QL_DPRINT12(dev->ha, "rc = %d "
6967                 "sa_len = 0x%x sa_family = 0x%x IP Address = %d.%d.%d.%d "
6968                 "Dest MAC %02x:%02x:%02x:%02x:%02x:%02x\n", rc,
6969                 dst_in->sin_len, dst_in->sin_family,
6970                 NIPQUAD((dst_in->sin_addr.s_addr)),
6971                 dst_mac[0], dst_mac[1], dst_mac[2],
6972                 dst_mac[3], dst_mac[4], dst_mac[5]);
6973
6974         return rc;
6975 }
6976
6977 int
6978 qlnxr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6979 {
6980         struct qlnxr_dev *dev;
6981         struct ecore_iwarp_connect_out out_params;
6982         struct ecore_iwarp_connect_in in_params;
6983         struct qlnxr_iw_ep *ep;
6984         struct qlnxr_qp *qp;
6985         struct sockaddr_in *laddr;
6986         struct sockaddr_in *raddr;
6987         int rc = 0;
6988         qlnx_host_t     *ha;
6989
6990         dev = get_qlnxr_dev((cm_id->device));
6991         ha = dev->ha;
6992
6993         QL_DPRINT12(ha, "[cm_id, conn_param] = [%p, %p] "
6994                 "enter \n", cm_id, conn_param);
6995
6996         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6997                 return -EINVAL;
6998
6999         qp = idr_find(&dev->qpidr, conn_param->qpn);
7000
7001         laddr = (struct sockaddr_in *)&cm_id->local_addr;
7002         raddr = (struct sockaddr_in *)&cm_id->remote_addr;
7003
7004         QL_DPRINT12(ha,
7005                 "local = [%d.%d.%d.%d, %d] remote = [%d.%d.%d.%d, %d]\n",
7006                 NIPQUAD((laddr->sin_addr.s_addr)), laddr->sin_port,
7007                 NIPQUAD((raddr->sin_addr.s_addr)), raddr->sin_port);
7008
7009         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
7010         if (!ep) {
7011                 QL_DPRINT11(ha, "struct qlnxr_iw_ep "
7012                         "alloc memory failed\n");
7013                 return -ENOMEM;
7014         }
7015
7016         ep->dev = dev;
7017         ep->qp = qp;
7018         cm_id->add_ref(cm_id);
7019         ep->cm_id = cm_id;
7020
7021         memset(&in_params, 0, sizeof (struct ecore_iwarp_connect_in));
7022         memset(&out_params, 0, sizeof (struct ecore_iwarp_connect_out));
7023
7024         in_params.event_cb = qlnxr_iw_event_handler;
7025         in_params.cb_context = ep;
7026
7027         in_params.cm_info.ip_version = ECORE_TCP_IPV4;
7028
7029         in_params.cm_info.remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
7030         in_params.cm_info.local_ip[0] = ntohl(laddr->sin_addr.s_addr);
7031         in_params.cm_info.remote_port = ntohs(raddr->sin_port);
7032         in_params.cm_info.local_port = ntohs(laddr->sin_port);
7033         in_params.cm_info.vlan = 0;
7034         in_params.mss = dev->ha->ifp->if_mtu - 40;
7035
7036         QL_DPRINT12(ha, "remote_ip = [%d.%d.%d.%d] "
7037                 "local_ip = [%d.%d.%d.%d] remote_port = %d local_port = %d "
7038                 "vlan = %d\n",
7039                 NIPQUAD((in_params.cm_info.remote_ip[0])),
7040                 NIPQUAD((in_params.cm_info.local_ip[0])),
7041                 in_params.cm_info.remote_port, in_params.cm_info.local_port,
7042                 in_params.cm_info.vlan);
7043
7044         rc = qlnxr_addr4_resolve(dev, laddr, raddr, (u8 *)in_params.remote_mac_addr);
7045
7046         if (rc) {
7047                 QL_DPRINT11(ha, "qlnxr_addr4_resolve failed\n");
7048                 goto err;
7049         }
7050
7051         QL_DPRINT12(ha, "ord = %d ird=%d private_data=%p"
7052                 " private_data_len=%d rq_psn=%d\n",
7053                 conn_param->ord, conn_param->ird, conn_param->private_data,
7054                 conn_param->private_data_len, qp->rq_psn);
7055
7056         in_params.cm_info.ord = conn_param->ord;
7057         in_params.cm_info.ird = conn_param->ird;
7058         in_params.cm_info.private_data = conn_param->private_data;
7059         in_params.cm_info.private_data_len = conn_param->private_data_len;
7060         in_params.qp = qp->ecore_qp;
7061
7062         memcpy(in_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
7063
7064         rc = ecore_iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
7065
7066         if (rc) {
7067                 QL_DPRINT12(ha, "ecore_iwarp_connect failed\n");
7068                 goto err;
7069         }
7070
7071         QL_DPRINT12(ha, "exit\n");
7072
7073         return rc;
7074
7075 err:
7076         cm_id->rem_ref(cm_id);
7077         kfree(ep);
7078
7079         QL_DPRINT12(ha, "exit [%d]\n", rc);
7080         return rc;
7081 }
7082
7083 int
7084 qlnxr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
7085 {
7086         struct qlnxr_dev *dev;
7087         struct qlnxr_iw_listener *listener;
7088         struct ecore_iwarp_listen_in iparams;
7089         struct ecore_iwarp_listen_out oparams;
7090         struct sockaddr_in *laddr;
7091         qlnx_host_t     *ha;
7092         int rc;
7093
7094         dev = get_qlnxr_dev((cm_id->device));
7095         ha = dev->ha;
7096
7097         QL_DPRINT12(ha, "enter\n");
7098
7099         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7100                 return -EINVAL;
7101
7102         laddr = (struct sockaddr_in *)&cm_id->local_addr;
7103
7104         listener = kzalloc(sizeof(*listener), GFP_KERNEL);
7105
7106         if (listener == NULL) {
7107                 QL_DPRINT11(ha, "listener memory alloc failed\n");
7108                 return -ENOMEM;
7109         }
7110
7111         listener->dev = dev;
7112         cm_id->add_ref(cm_id);
7113         listener->cm_id = cm_id;
7114         listener->backlog = backlog;
7115
7116         memset(&iparams, 0, sizeof (struct ecore_iwarp_listen_in));
7117         memset(&oparams, 0, sizeof (struct ecore_iwarp_listen_out));
7118
7119         iparams.cb_context = listener;
7120         iparams.event_cb = qlnxr_iw_event_handler;
7121         iparams.max_backlog = backlog;
7122
7123         iparams.ip_version = ECORE_TCP_IPV4;
7124
7125         iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
7126         iparams.port = ntohs(laddr->sin_port);
7127         iparams.vlan = 0;
7128
7129         QL_DPRINT12(ha, "[%d.%d.%d.%d, %d] iparamsport=%d\n",
7130                 NIPQUAD((laddr->sin_addr.s_addr)),
7131                 laddr->sin_port, iparams.port);
7132
7133         rc = ecore_iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
7134         if (rc) {
7135                 QL_DPRINT11(ha,
7136                         "ecore_iwarp_create_listen failed rc = %d\n", rc);
7137                 goto err;
7138         }
7139
7140         listener->ecore_handle = oparams.handle;
7141         cm_id->provider_data = listener;
7142
7143         QL_DPRINT12(ha, "exit\n");
7144         return rc;
7145
7146 err:
7147         cm_id->rem_ref(cm_id);
7148         kfree(listener);
7149
7150         QL_DPRINT12(ha, "exit [%d]\n", rc);
7151         return rc;
7152 }
7153
7154 void
7155 qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id)
7156 {
7157         struct qlnxr_iw_listener *listener = cm_id->provider_data;
7158         struct qlnxr_dev *dev = get_qlnxr_dev((cm_id->device));
7159         int rc = 0;
7160         qlnx_host_t     *ha;
7161
7162         ha = dev->ha;
7163
7164         QL_DPRINT12(ha, "enter\n");
7165
7166         if (listener->ecore_handle)
7167                 rc = ecore_iwarp_destroy_listen(dev->rdma_ctx,
7168                                 listener->ecore_handle);
7169
7170         cm_id->rem_ref(cm_id);
7171
7172         QL_DPRINT12(ha, "exit [%d]\n", rc);
7173         return;
7174 }
7175
7176 int
7177 qlnxr_iw_accept(struct iw_cm_id *cm_id,
7178         struct iw_cm_conn_param *conn_param)
7179 {
7180         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7181         struct qlnxr_dev *dev = ep->dev;
7182         struct qlnxr_qp *qp;
7183         struct ecore_iwarp_accept_in params;
7184         int rc;
7185         qlnx_host_t     *ha;
7186
7187         ha = dev->ha;
7188
7189         QL_DPRINT12(ha, "enter  qpid=%d\n", conn_param->qpn);
7190
7191         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7192                 return -EINVAL;
7193  
7194         qp = idr_find(&dev->qpidr, conn_param->qpn);
7195         if (!qp) {
7196                 QL_DPRINT11(ha, "idr_find failed invalid qpn = %d\n",
7197                         conn_param->qpn);
7198                 return -EINVAL;
7199         }
7200         ep->qp = qp;
7201         qp->ep = ep;
7202         cm_id->add_ref(cm_id);
7203         ep->cm_id = cm_id;
7204
7205         params.ep_context = ep->ecore_context;
7206         params.cb_context = ep;
7207         params.qp = ep->qp->ecore_qp;
7208         params.private_data = conn_param->private_data;
7209         params.private_data_len = conn_param->private_data_len;
7210         params.ird = conn_param->ird;
7211         params.ord = conn_param->ord;
7212
7213         rc = ecore_iwarp_accept(dev->rdma_ctx, &params);
7214         if (rc) {
7215                 QL_DPRINT11(ha, "ecore_iwarp_accept failed %d\n", rc);
7216                 goto err;
7217         }
7218
7219         QL_DPRINT12(ha, "exit\n");
7220         return 0;
7221 err:
7222         cm_id->rem_ref(cm_id);
7223         QL_DPRINT12(ha, "exit rc = %d\n", rc);
7224         return rc;
7225 }
7226
7227 int
7228 qlnxr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
7229 {
7230 #if __FreeBSD_version >= 1102000
7231
7232         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7233         struct qlnxr_dev *dev = ep->dev;
7234         struct ecore_iwarp_reject_in params;
7235         int rc;
7236
7237         params.ep_context = ep->ecore_context;
7238         params.cb_context = ep;
7239         params.private_data = pdata;
7240         params.private_data_len = pdata_len;
7241         ep->qp = NULL;
7242
7243         rc = ecore_iwarp_reject(dev->rdma_ctx, &params);
7244
7245         return rc;
7246
7247 #else
7248
7249         printf("iWARP reject_cr not implemented\n");
7250         return -EINVAL;
7251
7252 #endif /* #if __FreeBSD_version >= 1102000 */
7253 }
7254
7255 void
7256 qlnxr_iw_qp_add_ref(struct ib_qp *ibqp)
7257 {
7258         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7259         qlnx_host_t     *ha;
7260
7261         ha = qp->dev->ha;
7262
7263         QL_DPRINT12(ha, "enter ibqp = %p\n", ibqp);
7264  
7265         atomic_inc(&qp->refcnt);
7266
7267         QL_DPRINT12(ha, "exit \n");
7268         return;
7269 }
7270
7271 void
7272 qlnxr_iw_qp_rem_ref(struct ib_qp *ibqp)
7273 {
7274         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7275         qlnx_host_t     *ha;
7276
7277         ha = qp->dev->ha;
7278
7279         QL_DPRINT12(ha, "enter ibqp = %p qp = %p\n", ibqp, qp);
7280
7281         if (atomic_dec_and_test(&qp->refcnt)) {
7282                 qlnxr_idr_remove(qp->dev, qp->qp_id);
7283                 kfree(qp);
7284         }
7285
7286         QL_DPRINT12(ha, "exit \n");
7287         return;
7288 }
7289
7290 struct ib_qp *
7291 qlnxr_iw_get_qp(struct ib_device *ibdev, int qpn)
7292 {
7293         struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
7294         struct ib_qp *qp;
7295         qlnx_host_t     *ha;
7296
7297         ha = dev->ha;
7298
7299         QL_DPRINT12(ha, "enter dev = %p ibdev = %p qpn = %d\n", dev, ibdev, qpn);
7300
7301         qp = idr_find(&dev->qpidr, qpn);
7302
7303         QL_DPRINT12(ha, "exit qp = %p\n", qp);
7304
7305         return (qp);
7306 }