]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxr/qlnxr_verbs.c
zfs: merge OpenZFS master-891568c99
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxr / qlnxr_verbs.c
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File: qlnxr_verbs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "qlnxr_def.h"
35 #include "rdma_common.h"
36 #include "qlnxr_roce.h"
37 #include "qlnxr_cm.h"
38
39 #define upper_32_bits(x) (uint32_t)(x >> 32)
40 #define lower_32_bits(x) (uint32_t)(x)
41 #define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
42
43 #define TYPEPTR_ADDR_SET(type_ptr, field, vaddr)                        \
44         do {                                                            \
45                 (type_ptr)->field.hi = cpu_to_le32(upper_32_bits(vaddr));\
46                 (type_ptr)->field.lo = cpu_to_le32(lower_32_bits(vaddr));\
47         } while (0)
48
49 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
50         do {                                                    \
51                 TYPEPTR_ADDR_SET(sge, addr, vaddr);             \
52                 (sge)->length = cpu_to_le32(vlength);           \
53                 (sge)->flags = cpu_to_le32(vflags);             \
54         } while (0)
55
56 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
57         do {                                                    \
58                 TYPEPTR_ADDR_SET(hdr, wr_id, vwr_id);           \
59                 (hdr)->num_sges = num_sge;                      \
60         } while (0)
61
62 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
63         do {                                                    \
64                 TYPEPTR_ADDR_SET(sge, addr, vaddr);             \
65                 (sge)->length = cpu_to_le32(vlength);           \
66                 (sge)->l_key = cpu_to_le32(vlkey);              \
67         } while (0)
68
69 #define NIPQUAD(addr) \
70         ((unsigned char *)&addr)[0], \
71         ((unsigned char *)&addr)[1], \
72         ((unsigned char *)&addr)[2], \
73         ((unsigned char *)&addr)[3]
74
75 static int
76 qlnxr_check_srq_params(struct ib_pd *ibpd,
77         struct qlnxr_dev *dev,
78         struct ib_srq_init_attr *attrs);
79
80 static int
81 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
82         struct qlnxr_srq *srq,
83         struct qlnxr_create_srq_ureq *ureq,
84         int access, int dmasync);
85
86 static int
87 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
88         struct qlnxr_dev *dev,
89         struct ib_srq_init_attr *init_attr);
90
91 static int
92 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
93         struct qlnxr_srq *srq,
94         struct ib_udata *udata);
95
96 static void
97 qlnxr_free_srq_user_params(struct qlnxr_srq *srq);
98
99 static void
100 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq);
101
102 static u32
103 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq);
104
105 int
106 qlnxr_iw_query_gid(struct ib_device *ibdev, u8 port, int index,
107         union ib_gid *sgid)
108 {
109         struct qlnxr_dev        *dev;
110         qlnx_host_t             *ha;
111
112         dev = get_qlnxr_dev(ibdev);
113         ha = dev->ha;
114
115         QL_DPRINT12(ha, "enter\n");
116
117         memset(sgid->raw, 0, sizeof(sgid->raw));
118
119         memcpy(sgid->raw, dev->ha->primary_mac, sizeof (dev->ha->primary_mac));
120
121         QL_DPRINT12(ha, "exit\n");
122
123         return 0;
124 }
125
126 int
127 qlnxr_query_gid(struct ib_device *ibdev, u8 port, int index,
128         union ib_gid *sgid)
129 {
130         struct qlnxr_dev        *dev;
131         qlnx_host_t             *ha;
132
133         dev = get_qlnxr_dev(ibdev);
134         ha = dev->ha;
135         QL_DPRINT12(ha, "enter index: %d\n", index);
136 #if 0
137         int ret = 0;
138         /* @@@: if DEFINE_ROCE_GID_TABLE to be used here */
139         //if (!rdma_cap_roce_gid_table(ibdev, port)) {
140         if (!(rdma_protocol_roce(ibdev, port) &&
141                 ibdev->add_gid && ibdev->del_gid)) {
142                 QL_DPRINT11(ha, "acquire gid failed\n");
143                 return -ENODEV;
144         }
145
146         ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
147         if (ret == -EAGAIN) {
148                 memcpy(sgid, &zgid, sizeof(*sgid));
149                 return 0;
150         }
151 #endif
152         if ((index >= QLNXR_MAX_SGID) || (index < 0)) {
153                 QL_DPRINT12(ha, "invalid gid index %d\n", index);
154                 memset(sgid, 0, sizeof(*sgid));
155                 return -EINVAL;
156         }
157         memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
158
159         QL_DPRINT12(ha, "exit : %p\n", sgid);
160
161         return 0;
162 }
163
164 struct ib_srq *
165 qlnxr_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *init_attr,
166         struct ib_udata *udata)
167 {
168         struct qlnxr_dev        *dev;
169         qlnx_host_t             *ha;
170         struct ecore_rdma_destroy_srq_in_params destroy_in_params;
171         struct ecore_rdma_create_srq_out_params out_params;
172         struct ecore_rdma_create_srq_in_params in_params;
173         u64 pbl_base_addr, phy_prod_pair_addr;
174         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
175         struct ib_ucontext *ib_ctx = NULL;
176         struct qlnxr_srq_hwq_info *hw_srq;
177         struct qlnxr_ucontext *ctx = NULL;
178         struct qlnxr_create_srq_ureq ureq;
179         u32 page_cnt, page_size;
180         struct qlnxr_srq *srq;
181         int ret = 0;
182
183         dev = get_qlnxr_dev((ibpd->device));
184         ha = dev->ha;
185
186         QL_DPRINT12(ha, "enter\n");
187
188         ret = qlnxr_check_srq_params(ibpd, dev, init_attr);
189
190         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
191         if (!srq) {
192                 QL_DPRINT11(ha, "cannot allocate memory for srq\n");
193                 return NULL; //@@@ : TODO what to return here?
194         }
195
196         srq->dev = dev;
197         hw_srq = &srq->hw_srq;
198         spin_lock_init(&srq->lock);
199         memset(&in_params, 0, sizeof(in_params));
200
201         if (udata && ibpd->uobject && ibpd->uobject->context) {
202                 ib_ctx = ibpd->uobject->context;
203                 ctx = get_qlnxr_ucontext(ib_ctx);
204
205                 memset(&ureq, 0, sizeof(ureq));
206                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
207                         udata->inlen))) {
208                         QL_DPRINT11(ha, "problem"
209                                 " copying data from user space\n");
210                         goto err0;
211                 }
212
213                 ret = qlnxr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
214                 if (ret)
215                         goto err0;
216
217                 page_cnt = srq->usrq.pbl_info.num_pbes;
218                 pbl_base_addr = srq->usrq.pbl_tbl->pa;
219                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
220                 // @@@ : if DEFINE_IB_UMEM_PAGE_SHIFT
221                 // page_size = BIT(srq->usrq.umem->page_shift);
222                 // else
223                 page_size = srq->usrq.umem->page_size;
224         } else {
225                 struct ecore_chain *pbl;
226                 ret = qlnxr_alloc_srq_kernel_params(srq, dev, init_attr);
227                 if (ret)
228                         goto err0;
229                 pbl = &hw_srq->pbl;
230
231                 page_cnt = ecore_chain_get_page_cnt(pbl);
232                 pbl_base_addr = ecore_chain_get_pbl_phys(pbl);
233                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
234                 page_size = pbl->elem_per_page << 4;
235         }
236
237         in_params.pd_id = pd->pd_id;
238         in_params.pbl_base_addr = pbl_base_addr;
239         in_params.prod_pair_addr = phy_prod_pair_addr;
240         in_params.num_pages = page_cnt;
241         in_params.page_size = page_size;
242
243         ret = ecore_rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
244         if (ret)
245                 goto err1;
246
247         srq->srq_id = out_params.srq_id;
248
249         if (udata) {
250                 ret = qlnxr_copy_srq_uresp(dev, srq, udata);
251                 if (ret)
252                         goto err2;
253         }
254
255         QL_DPRINT12(ha, "created srq with srq_id = 0x%0x\n", srq->srq_id);
256         return &srq->ibsrq;
257 err2:
258         memset(&in_params, 0, sizeof(in_params));
259         destroy_in_params.srq_id = srq->srq_id;
260         ecore_rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
261
262 err1:
263         if (udata)
264                 qlnxr_free_srq_user_params(srq);
265         else
266                 qlnxr_free_srq_kernel_params(srq);
267
268 err0:
269         kfree(srq);     
270         return ERR_PTR(-EFAULT);
271 }
272
273 int
274 qlnxr_destroy_srq(struct ib_srq *ibsrq)
275 {
276         struct qlnxr_dev        *dev;
277         struct qlnxr_srq        *srq;
278         qlnx_host_t             *ha;
279         struct ecore_rdma_destroy_srq_in_params in_params;
280
281         srq = get_qlnxr_srq(ibsrq);
282         dev = srq->dev;
283         ha = dev->ha;
284
285         memset(&in_params, 0, sizeof(in_params));
286         in_params.srq_id = srq->srq_id;
287
288         ecore_rdma_destroy_srq(dev->rdma_ctx, &in_params);
289
290         if (ibsrq->pd->uobject && ibsrq->pd->uobject->context)
291                 qlnxr_free_srq_user_params(srq);
292         else
293                 qlnxr_free_srq_kernel_params(srq);
294
295         QL_DPRINT12(ha, "destroyed srq_id=0x%0x\n", srq->srq_id);
296         kfree(srq);
297         return 0;
298 }
299
300 int
301 qlnxr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
302         enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
303 {
304         struct qlnxr_dev        *dev;
305         struct qlnxr_srq        *srq;
306         qlnx_host_t             *ha;
307         struct ecore_rdma_modify_srq_in_params in_params;
308         int ret = 0;
309
310         srq = get_qlnxr_srq(ibsrq);
311         dev = srq->dev;
312         ha = dev->ha;
313
314         QL_DPRINT12(ha, "enter\n");
315         if (attr_mask & IB_SRQ_MAX_WR) {
316                 QL_DPRINT12(ha, "invalid attribute mask=0x%x"
317                         " specified for %p\n", attr_mask, srq);
318                 return -EINVAL;
319         }
320
321         if (attr_mask & IB_SRQ_LIMIT) {
322                 if (attr->srq_limit >= srq->hw_srq.max_wr) {
323                         QL_DPRINT12(ha, "invalid srq_limit=0x%x"
324                                 " (max_srq_limit = 0x%x)\n",
325                                attr->srq_limit, srq->hw_srq.max_wr);
326                         return -EINVAL; 
327                 }
328                 memset(&in_params, 0, sizeof(in_params));
329                 in_params.srq_id = srq->srq_id;
330                 in_params.wqe_limit = attr->srq_limit;
331                 ret = ecore_rdma_modify_srq(dev->rdma_ctx, &in_params);
332                 if (ret)
333                         return ret;
334         }
335
336         QL_DPRINT12(ha, "modified srq with srq_id = 0x%0x\n", srq->srq_id);
337         return 0;
338 }
339
340 int
341 qlnxr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
342 {
343         struct qlnxr_dev        *dev;
344         struct qlnxr_srq        *srq;
345         qlnx_host_t             *ha;
346         struct ecore_rdma_device *qattr;
347         srq = get_qlnxr_srq(ibsrq);
348         dev = srq->dev;
349         ha = dev->ha;
350         //qattr = &dev->attr;
351         qattr = ecore_rdma_query_device(dev->rdma_ctx);
352         QL_DPRINT12(ha, "enter\n");
353
354         if (!dev->rdma_ctx) {
355                 QL_DPRINT12(ha, "called with invalid params"
356                         " rdma_ctx is NULL\n");
357                 return -EINVAL;
358         }
359
360         srq_attr->srq_limit = qattr->max_srq;
361         srq_attr->max_wr = qattr->max_srq_wr;
362         srq_attr->max_sge = qattr->max_sge;
363
364         QL_DPRINT12(ha, "exit\n");
365         return 0;
366 }
367
368 /* Increment srq wr producer by one */
369 static
370 void qlnxr_inc_srq_wr_prod (struct qlnxr_srq_hwq_info *info)
371 {
372         info->wr_prod_cnt++;
373 }
374
375 /* Increment srq wr consumer by one */
376 static 
377 void qlnxr_inc_srq_wr_cons(struct qlnxr_srq_hwq_info *info)
378 {
379         info->wr_cons_cnt++;
380 }
381
382 /* get_port_immutable verb is not available in FreeBSD */
383 #if 0
384 int
385 qlnxr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
386         struct ib_port_immutable *immutable)
387 {
388         struct qlnxr_dev                *dev;
389         qlnx_host_t                     *ha;
390         dev = get_qlnxr_dev(ibdev);
391         ha = dev->ha;
392
393         QL_DPRINT12(ha, "entered but not implemented!!!\n");
394 }
395 #endif
396
397 int
398 qlnxr_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
399         struct ib_recv_wr **bad_wr)
400 {
401         struct qlnxr_dev        *dev;
402         struct qlnxr_srq        *srq;
403         qlnx_host_t             *ha;
404         struct qlnxr_srq_hwq_info *hw_srq;
405         struct ecore_chain *pbl;
406         unsigned long flags;
407         int status = 0;
408         u32 num_sge, offset;
409
410         srq = get_qlnxr_srq(ibsrq);
411         dev = srq->dev;
412         ha = dev->ha;
413         hw_srq = &srq->hw_srq;
414
415         QL_DPRINT12(ha, "enter\n");
416         spin_lock_irqsave(&srq->lock, flags);
417
418         pbl = &srq->hw_srq.pbl;
419         while (wr) {
420                 struct rdma_srq_wqe_header *hdr;
421                 int i;
422
423                 if (!qlnxr_srq_elem_left(hw_srq) ||
424                     wr->num_sge > srq->hw_srq.max_sges) {
425                         QL_DPRINT11(ha, "WR cannot be posted"
426                             " (%d, %d) || (%d > %d)\n",
427                             hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
428                             wr->num_sge, srq->hw_srq.max_sges);
429                         status = -ENOMEM;
430                         *bad_wr = wr;
431                         break;
432                 }
433
434                 hdr = ecore_chain_produce(pbl);
435                 num_sge = wr->num_sge;
436                 /* Set number of sge and WR id in header */
437                 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
438
439                 /* PBL is maintained in case of WR granularity.
440                  * So increment WR producer in case we post a WR.
441                  */
442                 qlnxr_inc_srq_wr_prod(hw_srq);
443                 hw_srq->wqe_prod++;
444                 hw_srq->sge_prod++;
445
446                 QL_DPRINT12(ha, "SRQ WR : SGEs: %d with wr_id[%d] = %llx\n",
447                         wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
448
449                 for (i = 0; i < wr->num_sge; i++) {
450                         struct rdma_srq_sge *srq_sge = 
451                             ecore_chain_produce(pbl);
452                         /* Set SGE length, lkey and address */
453                         SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
454                                 wr->sg_list[i].length, wr->sg_list[i].lkey);
455
456                         QL_DPRINT12(ha, "[%d]: len %d, key %x, addr %x:%x\n",
457                                 i, srq_sge->length, srq_sge->l_key,
458                                 srq_sge->addr.hi, srq_sge->addr.lo);
459                         hw_srq->sge_prod++;
460                 }
461                 wmb();
462                 /*
463                  * SRQ prod is 8 bytes. Need to update SGE prod in index
464                  * in first 4 bytes and need to update WQE prod in next
465                  * 4 bytes.
466                  */
467                 *(srq->hw_srq.virt_prod_pair_addr) = hw_srq->sge_prod;
468                 offset = offsetof(struct rdma_srq_producers, wqe_prod);
469                 *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
470                         hw_srq->wqe_prod;
471                 /* Flush prod after updating it */
472                 wmb();
473                 wr = wr->next;
474         }       
475
476         QL_DPRINT12(ha, "Elements in SRQ: %d\n",
477                 ecore_chain_get_elem_left(pbl));
478
479         spin_unlock_irqrestore(&srq->lock, flags);      
480         QL_DPRINT12(ha, "exit\n");
481         return status;
482 }
483
484 int
485 #if __FreeBSD_version < 1102000
486 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
487 #else
488 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
489         struct ib_udata *udata)
490 #endif /* #if __FreeBSD_version < 1102000 */
491
492 {
493         struct qlnxr_dev                *dev;
494         struct ecore_rdma_device        *qattr;
495         qlnx_host_t                     *ha;
496
497         dev = get_qlnxr_dev(ibdev);
498         ha = dev->ha;
499
500         QL_DPRINT12(ha, "enter\n");
501
502 #if __FreeBSD_version > 1102000
503         if (udata->inlen || udata->outlen)
504                 return -EINVAL;
505 #endif /* #if __FreeBSD_version > 1102000 */
506
507         if (dev->rdma_ctx == NULL) {
508                 return -EINVAL;
509         }
510
511         qattr = ecore_rdma_query_device(dev->rdma_ctx);
512
513         memset(attr, 0, sizeof *attr);
514
515         attr->fw_ver = qattr->fw_ver;
516         attr->sys_image_guid = qattr->sys_image_guid;
517         attr->max_mr_size = qattr->max_mr_size;
518         attr->page_size_cap = qattr->page_size_caps;
519         attr->vendor_id = qattr->vendor_id;
520         attr->vendor_part_id = qattr->vendor_part_id;
521         attr->hw_ver = qattr->hw_ver;
522         attr->max_qp = qattr->max_qp;
523         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
524                                         IB_DEVICE_RC_RNR_NAK_GEN |
525                                         IB_DEVICE_LOCAL_DMA_LKEY |
526                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
527
528         attr->max_sge = qattr->max_sge;
529         attr->max_sge_rd = qattr->max_sge;
530         attr->max_cq = qattr->max_cq;
531         attr->max_cqe = qattr->max_cqe;
532         attr->max_mr = qattr->max_mr;
533         attr->max_mw = qattr->max_mw;
534         attr->max_pd = qattr->max_pd;
535         attr->atomic_cap = dev->atomic_cap;
536         attr->max_fmr = qattr->max_fmr;
537         attr->max_map_per_fmr = 16; /* TBD: FMR */
538
539         /* There is an implicit assumption in some of the ib_xxx apps that the
540          * qp_rd_atom is smaller than the qp_init_rd_atom. Specifically, in
541          * communication the qp_rd_atom is passed to the other side and used as
542          * init_rd_atom without check device capabilities for init_rd_atom.
543          * for this reason, we set the qp_rd_atom to be the minimum between the
544          * two...There is an additional assumption in mlx4 driver that the
545          * values are power of two, fls is performed on the value - 1, which
546          * in fact gives a larger power of two for values which are not a power
547          * of two. This should be fixed in mlx4 driver, but until then ->
548          * we provide a value that is a power of two in our code.
549          */
550         attr->max_qp_init_rd_atom =
551                 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
552         attr->max_qp_rd_atom =
553                 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
554                     attr->max_qp_init_rd_atom);
555
556         attr->max_srq = qattr->max_srq;
557         attr->max_srq_sge = qattr->max_srq_sge;
558         attr->max_srq_wr = qattr->max_srq_wr;
559
560         /* TODO: R&D to more properly configure the following */
561         attr->local_ca_ack_delay = qattr->dev_ack_delay;
562         attr->max_fast_reg_page_list_len = qattr->max_mr/8;
563         attr->max_pkeys = QLNXR_ROCE_PKEY_MAX;
564         attr->max_ah = qattr->max_ah;
565
566         QL_DPRINT12(ha, "exit\n");
567         return 0;
568 }
569
570 static inline void
571 get_link_speed_and_width(int speed, uint8_t *ib_speed, uint8_t *ib_width)
572 {
573         switch (speed) {
574         case 1000:
575                 *ib_speed = IB_SPEED_SDR;
576                 *ib_width = IB_WIDTH_1X;
577                 break;
578         case 10000:
579                 *ib_speed = IB_SPEED_QDR;
580                 *ib_width = IB_WIDTH_1X;
581                 break;
582
583         case 20000:
584                 *ib_speed = IB_SPEED_DDR;
585                 *ib_width = IB_WIDTH_4X;
586                 break;
587
588         case 25000:
589                 *ib_speed = IB_SPEED_EDR;
590                 *ib_width = IB_WIDTH_1X;
591                 break;
592
593         case 40000:
594                 *ib_speed = IB_SPEED_QDR;
595                 *ib_width = IB_WIDTH_4X;
596                 break;
597
598         case 50000:
599                 *ib_speed = IB_SPEED_QDR;
600                 *ib_width = IB_WIDTH_4X; // TODO doesn't add up to 50...
601                 break;
602
603         case 100000:
604                 *ib_speed = IB_SPEED_EDR;
605                 *ib_width = IB_WIDTH_4X;
606                 break;
607
608         default:
609                 /* Unsupported */
610                 *ib_speed = IB_SPEED_SDR;
611                 *ib_width = IB_WIDTH_1X;
612         }
613         return;
614 }
615
616 int
617 qlnxr_query_port(struct ib_device *ibdev, uint8_t port,
618         struct ib_port_attr *attr)
619 {
620         struct qlnxr_dev        *dev;
621         struct ecore_rdma_port  *rdma_port;
622         qlnx_host_t             *ha;
623
624         dev = get_qlnxr_dev(ibdev);
625         ha = dev->ha;
626
627         QL_DPRINT12(ha, "enter\n");
628
629         if (port > 1) {
630                 QL_DPRINT12(ha, "port [%d] > 1 \n", port);
631                 return -EINVAL;
632         }
633
634         if (dev->rdma_ctx == NULL) {
635                 QL_DPRINT12(ha, "rdma_ctx == NULL\n");
636                 return -EINVAL;
637         }
638
639         rdma_port = ecore_rdma_query_port(dev->rdma_ctx);
640         memset(attr, 0, sizeof *attr);
641
642         if (rdma_port->port_state == ECORE_RDMA_PORT_UP) {
643                 attr->state = IB_PORT_ACTIVE;
644                 attr->phys_state = 5;
645         } else {
646                 attr->state = IB_PORT_DOWN;
647                 attr->phys_state = 3;
648         }
649
650         attr->max_mtu = IB_MTU_4096;
651         attr->active_mtu = iboe_get_mtu(dev->ha->ifp->if_mtu);
652         attr->lid = 0;
653         attr->lmc = 0;
654         attr->sm_lid = 0;
655         attr->sm_sl = 0;
656         attr->port_cap_flags = 0;
657
658         if (QLNX_IS_IWARP(dev)) {
659                 attr->gid_tbl_len = 1;
660                 attr->pkey_tbl_len = 1;
661         } else {
662                 attr->gid_tbl_len = QLNXR_MAX_SGID;
663                 attr->pkey_tbl_len = QLNXR_ROCE_PKEY_TABLE_LEN;
664         }
665
666         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
667         attr->qkey_viol_cntr = 0;
668
669         get_link_speed_and_width(rdma_port->link_speed,
670                                  &attr->active_speed, &attr->active_width);
671
672         attr->max_msg_sz = rdma_port->max_msg_size;
673         attr->max_vl_num = 4; /* TODO -> figure this one out... */
674
675         QL_DPRINT12(ha, "state = %d phys_state = %d "
676                 " link_speed = %d active_speed = %d active_width = %d"
677                 " attr->gid_tbl_len = %d attr->pkey_tbl_len = %d"
678                 " max_msg_sz = 0x%x max_vl_num = 0x%x \n",
679                 attr->state, attr->phys_state,
680                 rdma_port->link_speed, attr->active_speed,
681                 attr->active_width, attr->gid_tbl_len, attr->pkey_tbl_len,
682                 attr->max_msg_sz, attr->max_vl_num);
683
684         QL_DPRINT12(ha, "exit\n");
685         return 0;
686 }
687
688 int
689 qlnxr_modify_port(struct ib_device *ibdev, uint8_t port, int mask,
690         struct ib_port_modify *props)
691 {
692         struct qlnxr_dev        *dev;
693         qlnx_host_t             *ha;
694
695         dev = get_qlnxr_dev(ibdev);
696         ha = dev->ha;
697
698         QL_DPRINT12(ha, "enter\n");
699
700         if (port > 1) {
701                 QL_DPRINT12(ha, "port (%d) > 1\n", port);
702                 return -EINVAL;
703         }
704
705         QL_DPRINT12(ha, "exit\n");
706         return 0;
707 }
708
709 enum rdma_link_layer
710 qlnxr_link_layer(struct ib_device *ibdev, uint8_t port_num)
711 {
712         struct qlnxr_dev        *dev;
713         qlnx_host_t             *ha;
714
715         dev = get_qlnxr_dev(ibdev);
716         ha = dev->ha;
717
718         QL_DPRINT12(ha, "ibdev = %p port_num = 0x%x\n", ibdev, port_num);
719
720         return IB_LINK_LAYER_ETHERNET;
721 }
722
723 struct ib_pd *
724 qlnxr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context,
725         struct ib_udata *udata)
726 {
727         struct qlnxr_pd         *pd = NULL;
728         u16                     pd_id;
729         int                     rc;
730         struct qlnxr_dev        *dev;
731         qlnx_host_t             *ha;
732
733         dev = get_qlnxr_dev(ibdev);
734         ha = dev->ha;
735
736         QL_DPRINT12(ha, "ibdev = %p context = %p"
737                 " udata = %p enter\n", ibdev, context, udata);
738
739         if (dev->rdma_ctx == NULL) {
740                 QL_DPRINT11(ha, "dev->rdma_ctx = NULL\n");
741                 rc = -1;
742                 goto err;
743         }
744
745         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
746         if (!pd) {
747                 rc = -ENOMEM;
748                 QL_DPRINT11(ha, "kzalloc(pd) = NULL\n");
749                 goto err;
750         }
751
752         rc = ecore_rdma_alloc_pd(dev->rdma_ctx, &pd_id);
753         if (rc) {
754                 QL_DPRINT11(ha, "ecore_rdma_alloc_pd failed\n");
755                 goto err;
756         }
757
758         pd->pd_id = pd_id;
759
760         if (udata && context) {
761                 rc = ib_copy_to_udata(udata, &pd->pd_id, sizeof(pd->pd_id));
762                 if (rc) {
763                         QL_DPRINT11(ha, "ib_copy_to_udata failed\n");
764                         ecore_rdma_free_pd(dev->rdma_ctx, pd_id);
765                         goto err;
766                 }
767
768                 pd->uctx = get_qlnxr_ucontext(context);
769                 pd->uctx->pd = pd;
770         }
771
772         atomic_add_rel_32(&dev->pd_count, 1);
773         QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
774                 pd, pd_id, dev->pd_count);
775
776         return &pd->ibpd;
777
778 err:
779         kfree(pd);
780         QL_DPRINT12(ha, "exit -1\n");
781         return ERR_PTR(rc);
782 }
783
784 int
785 qlnxr_dealloc_pd(struct ib_pd *ibpd)
786 {
787         struct qlnxr_pd         *pd;
788         struct qlnxr_dev        *dev;
789         qlnx_host_t             *ha;
790
791         pd = get_qlnxr_pd(ibpd);
792         dev = get_qlnxr_dev((ibpd->device));
793         ha = dev->ha;
794
795         QL_DPRINT12(ha, "enter\n");
796
797         if (pd == NULL) {
798                 QL_DPRINT11(ha, "pd = NULL\n");
799         } else {
800                 ecore_rdma_free_pd(dev->rdma_ctx, pd->pd_id);
801                 kfree(pd);
802                 atomic_subtract_rel_32(&dev->pd_count, 1);
803                 QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
804                         pd, pd->pd_id, dev->pd_count);
805         }
806
807         QL_DPRINT12(ha, "exit\n");
808         return 0;
809 }
810
811 #define ROCE_WQE_ELEM_SIZE      sizeof(struct rdma_sq_sge)
812 #define RDMA_MAX_SGE_PER_SRQ    (4) /* Should be part of HSI */
813 /* Should be part of HSI */
814 #define RDMA_MAX_SRQ_WQE_SIZE   (RDMA_MAX_SGE_PER_SRQ + 1) /* +1 for header */
815 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
816
817 static void qlnxr_cleanup_user(struct qlnxr_dev *, struct qlnxr_qp *);
818 static void qlnxr_cleanup_kernel(struct qlnxr_dev *, struct qlnxr_qp *);
819
820 int
821 qlnxr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
822 {
823         struct qlnxr_dev        *dev;
824         qlnx_host_t             *ha;
825
826         dev = get_qlnxr_dev(ibdev);
827         ha = dev->ha;
828
829         QL_DPRINT12(ha, "enter index = 0x%x\n", index);
830
831         if (index > QLNXR_ROCE_PKEY_TABLE_LEN) 
832                 return -EINVAL;
833
834         *pkey = QLNXR_ROCE_PKEY_DEFAULT;
835
836         QL_DPRINT12(ha, "exit\n");
837         return 0;
838 }
839
840 static inline bool
841 qlnxr_get_vlan_id_qp(qlnx_host_t *ha, struct ib_qp_attr *attr, int attr_mask,
842        u16 *vlan_id)
843 {
844         bool ret = false;
845
846         QL_DPRINT12(ha, "enter \n");
847
848         *vlan_id = 0;
849
850 #if __FreeBSD_version >= 1100000
851         u16 tmp_vlan_id;
852
853 #if __FreeBSD_version >= 1102000
854         union ib_gid *dgid;
855
856         dgid = &attr->ah_attr.grh.dgid;
857         tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
858
859         if (!(tmp_vlan_id & ~EVL_VLID_MASK)) {
860                 *vlan_id = tmp_vlan_id;
861                 ret = true;
862         }
863 #else
864         tmp_vlan_id = attr->vlan_id;
865
866         if ((attr_mask & IB_QP_VID) && (!(tmp_vlan_id & ~EVL_VLID_MASK))) {
867                 *vlan_id = tmp_vlan_id;
868                 ret = true;
869         }
870
871 #endif /* #if __FreeBSD_version > 1102000 */
872
873 #else
874         ret = true;
875
876 #endif /* #if __FreeBSD_version >= 1100000 */
877
878         QL_DPRINT12(ha, "exit vlan_id = 0x%x ret = %d \n", *vlan_id, ret);
879
880         return (ret);
881 }
882
883 static inline void
884 get_gid_info(struct ib_qp *ibqp, struct ib_qp_attr *attr,
885         int attr_mask,
886         struct qlnxr_dev *dev,
887         struct qlnxr_qp *qp,
888         struct ecore_rdma_modify_qp_in_params *qp_params)
889 {
890         int             i;
891         qlnx_host_t     *ha;
892
893         ha = dev->ha;
894
895         QL_DPRINT12(ha, "enter\n");
896
897         memcpy(&qp_params->sgid.bytes[0],
898                &dev->sgid_tbl[qp->sgid_idx].raw[0],
899                sizeof(qp_params->sgid.bytes));
900         memcpy(&qp_params->dgid.bytes[0],
901                &attr->ah_attr.grh.dgid.raw[0],
902                sizeof(qp_params->dgid));
903
904         qlnxr_get_vlan_id_qp(ha, attr, attr_mask, &qp_params->vlan_id);
905
906         for (i = 0; i < (sizeof(qp_params->sgid.dwords)/sizeof(uint32_t)); i++) {
907                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
908                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
909         }
910
911         QL_DPRINT12(ha, "exit\n");
912         return;
913 }
914
915 static int
916 qlnxr_add_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
917 {
918         struct qlnxr_mm *mm;
919         qlnx_host_t     *ha;
920
921         ha = uctx->dev->ha;
922
923         QL_DPRINT12(ha, "enter\n");
924
925         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
926         if (mm == NULL) {
927                 QL_DPRINT11(ha, "mm = NULL\n");
928                 return -ENOMEM;
929         }
930
931         mm->key.phy_addr = phy_addr;
932
933         /* This function might be called with a length which is not a multiple
934          * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
935          * forces this granularity by increasing the requested size if needed.
936          * When qedr_mmap is called, it will search the list with the updated
937          * length as a key. To prevent search failures, the length is rounded up
938          * in advance to PAGE_SIZE.
939          */
940         mm->key.len = roundup(len, PAGE_SIZE);
941         INIT_LIST_HEAD(&mm->entry);
942
943         mutex_lock(&uctx->mm_list_lock);
944         list_add(&mm->entry, &uctx->mm_head);
945         mutex_unlock(&uctx->mm_list_lock);
946
947         QL_DPRINT12(ha, "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
948                 (unsigned long long)mm->key.phy_addr,
949                 (unsigned long)mm->key.len, uctx);
950
951         return 0;
952 }
953
954 static bool
955 qlnxr_search_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
956 {
957         bool            found = false;
958         struct qlnxr_mm *mm;
959         qlnx_host_t     *ha;
960
961         ha = uctx->dev->ha;
962
963         QL_DPRINT12(ha, "enter\n");
964
965         mutex_lock(&uctx->mm_list_lock);
966         list_for_each_entry(mm, &uctx->mm_head, entry) {
967                 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
968                         continue;
969
970                 found = true;
971                 break;
972         }
973         mutex_unlock(&uctx->mm_list_lock);
974
975         QL_DPRINT12(ha,
976                 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, found=%d\n",
977                 mm->key.phy_addr, mm->key.len, uctx, found);
978
979         return found;
980 }
981
982 struct
983 ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
984                 struct ib_udata *udata)
985 {
986         int rc;
987         struct qlnxr_ucontext *ctx;
988         struct qlnxr_alloc_ucontext_resp uresp;
989         struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
990         qlnx_host_t *ha = dev->ha;
991         struct ecore_rdma_add_user_out_params oparams;
992
993         if (!udata) {
994                 return ERR_PTR(-EFAULT);
995         }
996
997         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
998         if (!ctx)
999                 return ERR_PTR(-ENOMEM);
1000
1001         rc = ecore_rdma_add_user(dev->rdma_ctx, &oparams);
1002         if (rc) {
1003                 QL_DPRINT12(ha,
1004                         "Failed to allocate a DPI for a new RoCE application "
1005                         ",rc = %d. To overcome this, consider to increase "
1006                         "the number of DPIs, increase the doorbell BAR size "
1007                         "or just close unnecessary RoCE applications. In "
1008                         "order to increase the number of DPIs consult the "
1009                         "README\n", rc);
1010                 goto err;
1011         }
1012
1013         ctx->dpi = oparams.dpi;
1014         ctx->dpi_addr = oparams.dpi_addr;
1015         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
1016         ctx->dpi_size = oparams.dpi_size;
1017         INIT_LIST_HEAD(&ctx->mm_head);
1018         mutex_init(&ctx->mm_list_lock);
1019
1020         memset(&uresp, 0, sizeof(uresp));
1021         uresp.dpm_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, dpm_enabled)
1022                                 < udata->outlen ? dev->user_dpm_enabled : 0; //TODO: figure this out
1023         uresp.wids_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, wids_enabled)
1024                                 < udata->outlen ? 1 : 0; //TODO: figure this out
1025         uresp.wid_count = offsetof(struct qlnxr_alloc_ucontext_resp, wid_count)
1026                                 < udata->outlen ? oparams.wid_count : 0; //TODO: figure this out 
1027         uresp.db_pa = ctx->dpi_phys_addr;
1028         uresp.db_size = ctx->dpi_size;
1029         uresp.max_send_wr = dev->attr.max_sqe;
1030         uresp.max_recv_wr = dev->attr.max_rqe;
1031         uresp.max_srq_wr = dev->attr.max_srq_wr;
1032         uresp.sges_per_send_wr = QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
1033         uresp.sges_per_recv_wr = QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
1034         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
1035         uresp.max_cqes = QLNXR_MAX_CQES;
1036
1037         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1038         if (rc)
1039                 goto err;
1040
1041         ctx->dev = dev;
1042
1043         rc = qlnxr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
1044         if (rc)
1045                 goto err;
1046         QL_DPRINT12(ha, "Allocated user context %p\n",
1047                 &ctx->ibucontext);
1048
1049         return &ctx->ibucontext;
1050 err:
1051         kfree(ctx);
1052         return ERR_PTR(rc);
1053 }
1054
1055 int
1056 qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx)
1057 {
1058         struct qlnxr_ucontext *uctx = get_qlnxr_ucontext(ibctx);
1059         struct qlnxr_dev *dev = uctx->dev;
1060         qlnx_host_t *ha = dev->ha;
1061         struct qlnxr_mm *mm, *tmp;
1062         int status = 0;
1063
1064         QL_DPRINT12(ha, "Deallocating user context %p\n",
1065                         uctx);
1066
1067         if (dev) {
1068                 ecore_rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
1069         }
1070
1071         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
1072                 QL_DPRINT12(ha, "deleted addr= 0x%llx, len = 0x%lx for"
1073                                 " ctx=%p\n",
1074                                 mm->key.phy_addr, mm->key.len, uctx);
1075                 list_del(&mm->entry);
1076                 kfree(mm);
1077         }
1078         kfree(uctx);
1079         return status;
1080 }
1081
1082 int
1083 qlnxr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1084 {
1085         struct qlnxr_ucontext   *ucontext = get_qlnxr_ucontext(context);
1086         struct qlnxr_dev        *dev = get_qlnxr_dev((context->device));
1087         unsigned long           vm_page = vma->vm_pgoff << PAGE_SHIFT;
1088         u64                     unmapped_db;
1089         unsigned long           len = (vma->vm_end - vma->vm_start);
1090         int                     rc = 0;
1091         bool                    found;
1092         qlnx_host_t             *ha;
1093
1094         ha = dev->ha;
1095
1096 #if __FreeBSD_version > 1102000
1097         unmapped_db = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
1098 #else
1099         unmapped_db = dev->db_phys_addr;
1100 #endif /* #if __FreeBSD_version > 1102000 */
1101
1102         QL_DPRINT12(ha, "qedr_mmap enter vm_page=0x%lx"
1103                 " vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
1104                 vm_page, vma->vm_pgoff, unmapped_db,
1105                 dev->db_size, len);
1106
1107         if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
1108                 QL_DPRINT11(ha, "Vma_start not page aligned "
1109                         "vm_start = %ld vma_end = %ld\n", vma->vm_start,
1110                         vma->vm_end);
1111                 return -EINVAL;
1112         }
1113
1114         found = qlnxr_search_mmap(ucontext, vm_page, len);
1115         if (!found) {
1116                 QL_DPRINT11(ha, "Vma_pgoff not found in mapped array = %ld\n",
1117                         vma->vm_pgoff);
1118                 return -EINVAL;
1119         }
1120
1121         QL_DPRINT12(ha, "Mapping doorbell bar\n");
1122
1123 #if __FreeBSD_version > 1102000
1124
1125         if ((vm_page < unmapped_db) ||
1126                 ((vm_page + len) > (unmapped_db + ucontext->dpi_size))) {
1127                 QL_DPRINT11(ha, "failed pages are outside of dpi;"
1128                         "page address=0x%lx, unmapped_db=0x%lx, dpi_size=0x%x\n",
1129                         vm_page, unmapped_db, ucontext->dpi_size);
1130                 return -EINVAL;
1131         }
1132
1133         if (vma->vm_flags & VM_READ) {
1134                 QL_DPRINT11(ha, "failed mmap, cannot map doorbell bar for read\n");
1135                 return -EINVAL;
1136         }
1137
1138         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1139         rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
1140                         vma->vm_page_prot);
1141
1142 #else
1143
1144         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
1145                 dev->db_size))) {
1146                 QL_DPRINT12(ha, "Mapping doorbell bar\n");
1147
1148                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1149
1150                 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1151                                             PAGE_SIZE, vma->vm_page_prot);
1152         } else {
1153                 QL_DPRINT12(ha, "Mapping chains\n");
1154                 rc = io_remap_pfn_range(vma, vma->vm_start,
1155                                          vma->vm_pgoff, len, vma->vm_page_prot);
1156         }
1157
1158 #endif /* #if __FreeBSD_version > 1102000 */
1159
1160         QL_DPRINT12(ha, "exit [%d]\n", rc);
1161         return rc;
1162 }
1163
1164 struct ib_mr *
1165 qlnxr_get_dma_mr(struct ib_pd *ibpd, int acc)
1166 {
1167         struct qlnxr_mr         *mr;
1168         struct qlnxr_dev        *dev = get_qlnxr_dev((ibpd->device));
1169         struct qlnxr_pd         *pd = get_qlnxr_pd(ibpd);
1170         int                     rc;
1171         qlnx_host_t             *ha;
1172
1173         ha = dev->ha;
1174
1175         QL_DPRINT12(ha, "enter\n");
1176
1177         if (acc & IB_ACCESS_MW_BIND) {
1178                 QL_DPRINT12(ha, "Unsupported access flags received for dma mr\n");
1179         }
1180
1181         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1182         if (!mr) {
1183                 rc = -ENOMEM;
1184                 QL_DPRINT12(ha, "kzalloc(mr) failed %d\n", rc);
1185                 goto err0;
1186         }
1187
1188         mr->type = QLNXR_MR_DMA;
1189
1190         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1191         if (rc) {
1192                 QL_DPRINT12(ha, "ecore_rdma_alloc_tid failed %d\n", rc);
1193                 goto err1;
1194         }
1195
1196         /* index only, 18 bit long, lkey = itid << 8 | key */
1197         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1198         mr->hw_mr.pd = pd->pd_id;
1199         mr->hw_mr.local_read = 1;
1200         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1201         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1202         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1203         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1204         mr->hw_mr.dma_mr = true;
1205
1206         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1207         if (rc) {
1208                 QL_DPRINT12(ha, "ecore_rdma_register_tid failed %d\n", rc);
1209                 goto err2;
1210         }
1211
1212         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1213
1214         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1215                 mr->hw_mr.remote_atomic) {
1216                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1217         }
1218
1219         QL_DPRINT12(ha, "lkey = %x\n", mr->ibmr.lkey);
1220
1221         return &mr->ibmr;
1222
1223 err2:
1224         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1225 err1:
1226         kfree(mr);
1227 err0:
1228         QL_DPRINT12(ha, "exit [%d]\n", rc);
1229
1230         return ERR_PTR(rc);
1231 }
1232
1233 static void
1234 qlnxr_free_pbl(struct qlnxr_dev *dev, struct qlnxr_pbl_info *pbl_info,
1235         struct qlnxr_pbl *pbl)
1236 {
1237         int             i;
1238         qlnx_host_t     *ha;
1239
1240         ha = dev->ha;
1241
1242         QL_DPRINT12(ha, "enter\n");
1243
1244         for (i = 0; i < pbl_info->num_pbls; i++) {
1245                 if (!pbl[i].va)
1246                         continue;
1247                 qlnx_dma_free_coherent(&dev->ha->cdev, pbl[i].va, pbl[i].pa,
1248                         pbl_info->pbl_size);
1249         }
1250         kfree(pbl);
1251
1252         QL_DPRINT12(ha, "exit\n");
1253         return;
1254 }
1255
1256 #define MIN_FW_PBL_PAGE_SIZE (4*1024)
1257 #define MAX_FW_PBL_PAGE_SIZE (64*1024)
1258
1259 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
1260 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
1261 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE*MAX_PBES_ON_PAGE)
1262
1263 static struct qlnxr_pbl *
1264 qlnxr_alloc_pbl_tbl(struct qlnxr_dev *dev,
1265         struct qlnxr_pbl_info *pbl_info, gfp_t flags)
1266 {
1267         void                    *va;
1268         dma_addr_t              pa;
1269         dma_addr_t              *pbl_main_tbl;
1270         struct qlnxr_pbl        *pbl_table;
1271         int                     i, rc = 0;
1272         qlnx_host_t             *ha;
1273
1274         ha = dev->ha;
1275
1276         QL_DPRINT12(ha, "enter\n");
1277
1278         pbl_table = kzalloc(sizeof(*pbl_table) * pbl_info->num_pbls, flags);
1279
1280         if (!pbl_table) {
1281                 QL_DPRINT12(ha, "pbl_table = NULL\n");
1282                 return NULL;
1283         }
1284
1285         for (i = 0; i < pbl_info->num_pbls; i++) {
1286                 va = qlnx_dma_alloc_coherent(&dev->ha->cdev, &pa, pbl_info->pbl_size);
1287                 if (!va) {
1288                         QL_DPRINT11(ha, "Failed to allocate pbl#%d\n", i);
1289                         rc = -ENOMEM;
1290                         goto err;
1291                 }
1292                 memset(va, 0, pbl_info->pbl_size);
1293                 pbl_table[i].va = va;
1294                 pbl_table[i].pa = pa;
1295         }
1296
1297         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
1298          * the first one with physical pointers to all of the rest
1299          */
1300         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
1301         for (i = 0; i < pbl_info->num_pbls - 1; i++)
1302                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
1303
1304         QL_DPRINT12(ha, "exit\n");
1305         return pbl_table;
1306
1307 err:
1308         qlnxr_free_pbl(dev, pbl_info, pbl_table);
1309
1310         QL_DPRINT12(ha, "exit with error\n");
1311         return NULL;
1312 }
1313
1314 static int
1315 qlnxr_prepare_pbl_tbl(struct qlnxr_dev *dev,
1316         struct qlnxr_pbl_info *pbl_info,
1317         u32 num_pbes,
1318         int two_layer_capable)
1319 {
1320         u32             pbl_capacity;
1321         u32             pbl_size;
1322         u32             num_pbls;
1323         qlnx_host_t     *ha;
1324
1325         ha = dev->ha;
1326
1327         QL_DPRINT12(ha, "enter\n");
1328
1329         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
1330                 if (num_pbes > MAX_PBES_TWO_LAYER) {
1331                         QL_DPRINT11(ha, "prepare pbl table: too many pages %d\n",
1332                                 num_pbes);
1333                         return -EINVAL;
1334                 }
1335
1336                 /* calculate required pbl page size */
1337                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
1338                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
1339                         NUM_PBES_ON_PAGE(pbl_size);
1340
1341                 while (pbl_capacity < num_pbes) {
1342                         pbl_size *= 2;
1343                         pbl_capacity = pbl_size / sizeof(u64);
1344                         pbl_capacity = pbl_capacity * pbl_capacity;
1345                 }
1346
1347                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
1348                 num_pbls++; /* One for the layer0 ( points to the pbls) */
1349                 pbl_info->two_layered = true;
1350         } else {
1351                 /* One layered PBL */
1352                 num_pbls = 1;
1353                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, \
1354                                 roundup_pow_of_two((num_pbes * sizeof(u64))));
1355                 pbl_info->two_layered = false;
1356         }
1357
1358         pbl_info->num_pbls = num_pbls;
1359         pbl_info->pbl_size = pbl_size;
1360         pbl_info->num_pbes = num_pbes;
1361
1362         QL_DPRINT12(ha, "prepare pbl table: num_pbes=%d, num_pbls=%d pbl_size=%d\n",
1363                 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
1364
1365         return 0;
1366 }
1367
1368 #define upper_32_bits(x) (uint32_t)(x >> 32)
1369 #define lower_32_bits(x) (uint32_t)(x)
1370
1371 static void
1372 qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
1373         struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
1374 {
1375         struct regpair          *pbe;
1376         struct qlnxr_pbl        *pbl_tbl;
1377         struct scatterlist      *sg;
1378         int                     shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
1379         qlnx_host_t             *ha;
1380
1381 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1382         int                     i;
1383         struct                  ib_umem_chunk *chunk = NULL;
1384 #else
1385         int                     entry;
1386 #endif
1387
1388         ha = dev->ha;
1389
1390         QL_DPRINT12(ha, "enter\n");
1391
1392         if (!pbl_info) {
1393                 QL_DPRINT11(ha, "PBL_INFO not initialized\n");
1394                 return;
1395         }
1396
1397         if (!pbl_info->num_pbes) {
1398                 QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
1399                 return;
1400         }
1401
1402         /* If we have a two layered pbl, the first pbl points to the rest
1403          * of the pbls and the first entry lays on the second pbl in the table
1404          */
1405         if (pbl_info->two_layered)
1406                 pbl_tbl = &pbl[1];
1407         else
1408                 pbl_tbl = pbl;
1409
1410         pbe = (struct regpair *)pbl_tbl->va;
1411         if (!pbe) {
1412                 QL_DPRINT12(ha, "pbe is NULL\n");
1413                 return;
1414         }
1415
1416         pbe_cnt = 0;
1417
1418         shift = ilog2(umem->page_size);
1419
1420 #ifndef DEFINE_IB_UMEM_WITH_CHUNK
1421
1422         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
1423 #else
1424         list_for_each_entry(chunk, &umem->chunk_list, list) {
1425                 /* get all the dma regions from the chunk. */
1426                 for (i = 0; i < chunk->nmap; i++) {
1427                         sg = &chunk->page_list[i];
1428 #endif
1429                         pages = sg_dma_len(sg) >> shift;
1430                         for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
1431                                 /* store the page address in pbe */
1432                                 pbe->lo =
1433                                     cpu_to_le32(sg_dma_address(sg) +
1434                                                 (umem->page_size * pg_cnt));
1435                                 pbe->hi =
1436                                     cpu_to_le32(upper_32_bits
1437                                                 ((sg_dma_address(sg) +
1438                                                   umem->page_size * pg_cnt)));
1439
1440                                 QL_DPRINT12(ha,
1441                                         "Populate pbl table:"
1442                                         " pbe->addr=0x%x:0x%x "
1443                                         " pbe_cnt = %d total_num_pbes=%d"
1444                                         " pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
1445                                         total_num_pbes, pbe);
1446
1447                                 pbe_cnt ++;
1448                                 total_num_pbes ++;
1449                                 pbe++;
1450
1451                                 if (total_num_pbes == pbl_info->num_pbes)
1452                                         return;
1453
1454                                 /* if the given pbl is full storing the pbes,
1455                                  * move to next pbl.
1456                                  */
1457                                 if (pbe_cnt ==
1458                                         (pbl_info->pbl_size / sizeof(u64))) {
1459                                         pbl_tbl++;
1460                                         pbe = (struct regpair *)pbl_tbl->va;
1461                                         pbe_cnt = 0;
1462                                 }
1463                         }
1464 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1465                 }
1466 #endif
1467         }
1468         QL_DPRINT12(ha, "exit\n");
1469         return;
1470 }
1471
1472 static void
1473 free_mr_info(struct qlnxr_dev *dev, struct mr_info *info)
1474 {
1475         struct qlnxr_pbl *pbl, *tmp;
1476         qlnx_host_t             *ha;
1477
1478         ha = dev->ha;
1479
1480         QL_DPRINT12(ha, "enter\n");
1481
1482         if (info->pbl_table)
1483                 list_add_tail(&info->pbl_table->list_entry,
1484                               &info->free_pbl_list);
1485
1486         if (!list_empty(&info->inuse_pbl_list))
1487                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
1488
1489         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
1490                 list_del(&pbl->list_entry);
1491                 qlnxr_free_pbl(dev, &info->pbl_info, pbl);
1492         }
1493         QL_DPRINT12(ha, "exit\n");
1494
1495         return;
1496 }
1497
1498 static int
1499 qlnxr_init_mr_info(struct qlnxr_dev *dev, struct mr_info *info,
1500         size_t page_list_len, bool two_layered)
1501 {
1502         int                     rc;
1503         struct qlnxr_pbl        *tmp;
1504         qlnx_host_t             *ha;
1505
1506         ha = dev->ha;
1507
1508         QL_DPRINT12(ha, "enter\n");
1509
1510         INIT_LIST_HEAD(&info->free_pbl_list);
1511         INIT_LIST_HEAD(&info->inuse_pbl_list);
1512
1513         rc = qlnxr_prepare_pbl_tbl(dev, &info->pbl_info,
1514                                   page_list_len, two_layered);
1515         if (rc) {
1516                 QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl [%d]\n", rc);
1517                 goto done;
1518         }
1519
1520         info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1521
1522         if (!info->pbl_table) {
1523                 rc = -ENOMEM;
1524                 QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl returned NULL\n");
1525                 goto done;
1526         }
1527
1528         QL_DPRINT12(ha, "pbl_table_pa = %pa\n", &info->pbl_table->pa);
1529
1530         /* in usual case we use 2 PBLs, so we add one to free
1531          * list and allocating another one
1532          */
1533         tmp = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1534
1535         if (!tmp) {
1536                 QL_DPRINT11(ha, "Extra PBL is not allocated\n");
1537                 goto done; /* it's OK if second allocation fails, so rc = 0*/
1538         }
1539
1540         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
1541
1542         QL_DPRINT12(ha, "extra pbl_table_pa = %pa\n", &tmp->pa);
1543
1544 done:
1545         if (rc)
1546                 free_mr_info(dev, info);
1547
1548         QL_DPRINT12(ha, "exit [%d]\n", rc);
1549
1550         return rc;
1551 }
1552
1553 struct ib_mr *
1554 #if __FreeBSD_version >= 1102000
1555 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1556         u64 usr_addr, int acc, struct ib_udata *udata)
1557 #else
1558 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1559         u64 usr_addr, int acc, struct ib_udata *udata, int mr_id)
1560 #endif /* #if __FreeBSD_version >= 1102000 */
1561 {
1562         int             rc = -ENOMEM;
1563         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
1564         struct qlnxr_mr *mr;
1565         struct qlnxr_pd *pd;
1566         qlnx_host_t     *ha;
1567
1568         ha = dev->ha;
1569
1570         QL_DPRINT12(ha, "enter\n");
1571
1572         pd = get_qlnxr_pd(ibpd);
1573
1574         QL_DPRINT12(ha, "qedr_register user mr pd = %d"
1575                 " start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
1576                 pd->pd_id, start, len, usr_addr, acc);
1577
1578         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
1579                 QL_DPRINT11(ha,
1580                         "(acc & IB_ACCESS_REMOTE_WRITE &&"
1581                         " !(acc & IB_ACCESS_LOCAL_WRITE))\n");
1582                 return ERR_PTR(-EINVAL);
1583         }
1584
1585         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1586         if (!mr) {
1587                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
1588                 return ERR_PTR(rc);
1589         }
1590
1591         mr->type = QLNXR_MR_USER;
1592
1593         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
1594         if (IS_ERR(mr->umem)) {
1595                 rc = -EFAULT;
1596                 QL_DPRINT11(ha, "ib_umem_get failed [%p]\n", mr->umem);
1597                 goto err0;
1598         }
1599
1600         rc = qlnxr_init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
1601         if (rc) {
1602                 QL_DPRINT11(ha,
1603                         "qlnxr_init_mr_info failed [%d]\n", rc);
1604                 goto err1;
1605         }
1606
1607         qlnxr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
1608                            &mr->info.pbl_info);
1609
1610         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1611
1612         if (rc) {
1613                 QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
1614                 goto err1;
1615         }
1616
1617         /* index only, 18 bit long, lkey = itid << 8 | key */
1618         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1619         mr->hw_mr.key = 0;
1620         mr->hw_mr.pd = pd->pd_id;
1621         mr->hw_mr.local_read = 1;
1622         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1623         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1624         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1625         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1626         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
1627         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
1628         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
1629         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
1630         mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); /* for the MR pages */
1631
1632 #if __FreeBSD_version >= 1102000
1633         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
1634 #else
1635         mr->hw_mr.fbo = mr->umem->offset;
1636 #endif
1637         mr->hw_mr.length = len;
1638         mr->hw_mr.vaddr = usr_addr;
1639         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
1640         mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
1641         mr->hw_mr.dma_mr = false;
1642
1643         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1644         if (rc) {
1645                 QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
1646                 goto err2;
1647         }
1648
1649         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1650         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1651                 mr->hw_mr.remote_atomic)
1652                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1653
1654         QL_DPRINT12(ha, "register user mr lkey: %x\n", mr->ibmr.lkey);
1655
1656         return (&mr->ibmr);
1657
1658 err2:
1659         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1660 err1:
1661         qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1662 err0:
1663         kfree(mr);
1664
1665         QL_DPRINT12(ha, "exit [%d]\n", rc);
1666         return (ERR_PTR(rc));
1667 }
1668
1669 int
1670 qlnxr_dereg_mr(struct ib_mr *ib_mr)
1671 {
1672         struct qlnxr_mr *mr = get_qlnxr_mr(ib_mr);
1673         struct qlnxr_dev *dev = get_qlnxr_dev((ib_mr->device));
1674         int             rc = 0;
1675         qlnx_host_t     *ha;
1676
1677         ha = dev->ha;
1678
1679         QL_DPRINT12(ha, "enter\n");
1680
1681         if ((mr->type != QLNXR_MR_DMA) && (mr->type != QLNXR_MR_FRMR))
1682                 qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1683
1684         /* it could be user registered memory. */
1685         if (mr->umem)
1686                 ib_umem_release(mr->umem);
1687
1688         kfree(mr->pages);
1689
1690         kfree(mr);
1691
1692         QL_DPRINT12(ha, "exit\n");
1693         return rc;
1694 }
1695
1696 static int
1697 qlnxr_copy_cq_uresp(struct qlnxr_dev *dev,
1698         struct qlnxr_cq *cq, struct ib_udata *udata)
1699 {
1700         struct qlnxr_create_cq_uresp    uresp;
1701         int                             rc;
1702         qlnx_host_t                     *ha;
1703
1704         ha = dev->ha;
1705
1706         QL_DPRINT12(ha, "enter\n");
1707
1708         memset(&uresp, 0, sizeof(uresp));
1709
1710         uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1711         uresp.icid = cq->icid;
1712
1713         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1714
1715         if (rc) {
1716                 QL_DPRINT12(ha, "ib_copy_to_udata error cqid=0x%x[%d]\n",
1717                         cq->icid, rc);
1718         }
1719
1720         QL_DPRINT12(ha, "exit [%d]\n", rc);
1721         return rc;
1722 }
1723
1724 static void
1725 consume_cqe(struct qlnxr_cq *cq)
1726 {
1727
1728         if (cq->latest_cqe == cq->toggle_cqe)
1729                 cq->pbl_toggle ^= RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1730
1731         cq->latest_cqe = ecore_chain_consume(&cq->pbl);
1732 }
1733
1734 static inline int
1735 qlnxr_align_cq_entries(int entries)
1736 {
1737         u64 size, aligned_size;
1738
1739         /* We allocate an extra entry that we don't report to the FW.
1740          * Why?
1741          * The CQE size is 32 bytes but the FW writes in chunks of 64 bytes
1742          * (for performance purposes). Allocating an extra entry and telling
1743          * the FW we have less prevents overwriting the first entry in case of
1744          * a wrap i.e. when the FW writes the last entry and the application
1745          * hasn't read the first one.
1746          */
1747         size = (entries + 1) * QLNXR_CQE_SIZE;
1748
1749         /* We align to PAGE_SIZE.
1750          * Why?
1751          * Since the CQ is going to be mapped and the mapping is anyhow in whole
1752          * kernel pages we benefit from the possibly extra CQEs.
1753          */
1754         aligned_size = ALIGN(size, PAGE_SIZE);
1755
1756         /* note: for CQs created in user space the result of this function
1757          * should match the size mapped in user space
1758          */
1759         return (aligned_size / QLNXR_CQE_SIZE);
1760 }
1761
1762 static inline int
1763 qlnxr_init_user_queue(struct ib_ucontext *ib_ctx, struct qlnxr_dev *dev,
1764         struct qlnxr_userq *q, u64 buf_addr, size_t buf_len,
1765         int access, int dmasync, int alloc_and_init)
1766 {
1767         int             page_cnt;
1768         int             rc;
1769         qlnx_host_t     *ha;
1770
1771         ha = dev->ha;
1772
1773         QL_DPRINT12(ha, "enter\n");
1774
1775         q->buf_addr = buf_addr;
1776         q->buf_len = buf_len;
1777
1778         QL_DPRINT12(ha, "buf_addr : %llx, buf_len : %x, access : %x"
1779               " dmasync : %x\n", q->buf_addr, q->buf_len,
1780                 access, dmasync);       
1781
1782         q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
1783
1784         if (IS_ERR(q->umem)) {
1785                 QL_DPRINT11(ha, "ib_umem_get failed [%lx]\n", PTR_ERR(q->umem));
1786                 return PTR_ERR(q->umem);
1787         }
1788
1789         page_cnt = ib_umem_page_count(q->umem);
1790         rc = qlnxr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt,
1791                                   0 /* SQ and RQ don't support dual layer pbl.
1792                                      * CQ may, but this is yet uncoded.
1793                                      */);
1794         if (rc) {
1795                 QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl failed [%d]\n", rc);
1796                 goto err;
1797         }
1798
1799         if (alloc_and_init) {
1800                 q->pbl_tbl = qlnxr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
1801
1802                 if (!q->pbl_tbl) {
1803                         QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1804                         rc = -ENOMEM;
1805                         goto err;
1806                 }
1807
1808                 qlnxr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
1809         } else {
1810                 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
1811
1812                 if (!q->pbl_tbl) {
1813                         QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1814                         rc = -ENOMEM;
1815                         goto err;
1816                 }
1817         }
1818
1819         QL_DPRINT12(ha, "exit\n");
1820         return 0;
1821
1822 err:
1823         ib_umem_release(q->umem);
1824         q->umem = NULL;
1825
1826         QL_DPRINT12(ha, "exit [%d]\n", rc);
1827         return rc;
1828 }
1829
1830 #if __FreeBSD_version >= 1102000
1831
1832 struct ib_cq *
1833 qlnxr_create_cq(struct ib_device *ibdev,
1834         const struct ib_cq_init_attr *attr,
1835         struct ib_ucontext *ib_ctx,
1836         struct ib_udata *udata)
1837
1838 #else 
1839
1840 #if __FreeBSD_version >= 1100000
1841
1842 struct ib_cq *
1843 qlnxr_create_cq(struct ib_device *ibdev,
1844         struct ib_cq_init_attr *attr,
1845         struct ib_ucontext *ib_ctx,
1846         struct ib_udata *udata)
1847
1848 #else
1849
1850 struct ib_cq *
1851 qlnxr_create_cq(struct ib_device *ibdev,
1852         int entries,
1853         int vector,
1854         struct ib_ucontext *ib_ctx,
1855         struct ib_udata *udata)
1856 #endif /* #if __FreeBSD_version >= 1100000 */
1857
1858 #endif /* #if __FreeBSD_version >= 1102000 */
1859 {
1860         struct qlnxr_ucontext                   *ctx;
1861         struct ecore_rdma_destroy_cq_out_params destroy_oparams;
1862         struct ecore_rdma_destroy_cq_in_params  destroy_iparams;
1863         struct qlnxr_dev                        *dev;
1864         struct ecore_rdma_create_cq_in_params   params;
1865         struct qlnxr_create_cq_ureq             ureq;
1866
1867 #if __FreeBSD_version >= 1100000
1868         int                                     vector = attr->comp_vector;
1869         int                                     entries = attr->cqe;
1870 #endif
1871         struct qlnxr_cq                         *cq;
1872         int                                     chain_entries, rc, page_cnt;
1873         u64                                     pbl_ptr;
1874         u16                                     icid;
1875         qlnx_host_t                             *ha;
1876
1877         dev = get_qlnxr_dev(ibdev);
1878         ha = dev->ha;
1879
1880         QL_DPRINT12(ha, "called from %s. entries = %d, "
1881                 "vector = %d\n",
1882                 (udata ? "User Lib" : "Kernel"), entries, vector);
1883
1884         memset(&params, 0, sizeof(struct ecore_rdma_create_cq_in_params));
1885         memset(&destroy_iparams, 0, sizeof(struct ecore_rdma_destroy_cq_in_params));
1886         memset(&destroy_oparams, 0, sizeof(struct ecore_rdma_destroy_cq_out_params));
1887
1888         if (entries > QLNXR_MAX_CQES) {
1889                 QL_DPRINT11(ha,
1890                         "the number of entries %d is too high. "
1891                         "Must be equal or below %d.\n",
1892                         entries, QLNXR_MAX_CQES);
1893                 return ERR_PTR(-EINVAL);
1894         }
1895         chain_entries = qlnxr_align_cq_entries(entries);
1896         chain_entries = min_t(int, chain_entries, QLNXR_MAX_CQES);
1897
1898         cq = qlnx_zalloc((sizeof(struct qlnxr_cq)));
1899
1900         if (!cq)
1901                 return ERR_PTR(-ENOMEM);
1902
1903         if (udata) {
1904                 memset(&ureq, 0, sizeof(ureq));
1905
1906                 if (ib_copy_from_udata(&ureq, udata,
1907                         min(sizeof(ureq), udata->inlen))) {
1908                         QL_DPRINT11(ha, "ib_copy_from_udata failed\n");
1909                         goto err0;
1910                 }
1911
1912                 if (!ureq.len) {
1913                         QL_DPRINT11(ha, "ureq.len == 0\n");
1914                         goto err0;
1915                 }
1916
1917                 cq->cq_type = QLNXR_CQ_TYPE_USER;
1918
1919                 qlnxr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr, ureq.len,
1920                                      IB_ACCESS_LOCAL_WRITE, 1, 1);
1921
1922                 pbl_ptr = cq->q.pbl_tbl->pa;
1923                 page_cnt = cq->q.pbl_info.num_pbes;
1924                 cq->ibcq.cqe = chain_entries;
1925         } else {
1926                 cq->cq_type = QLNXR_CQ_TYPE_KERNEL;
1927
1928                 rc = ecore_chain_alloc(&dev->ha->cdev,
1929                            ECORE_CHAIN_USE_TO_CONSUME,
1930                            ECORE_CHAIN_MODE_PBL,
1931                            ECORE_CHAIN_CNT_TYPE_U32,
1932                            chain_entries,
1933                            sizeof(union roce_cqe),
1934                            &cq->pbl, NULL);
1935
1936                 if (rc)
1937                         goto err1;
1938
1939                 page_cnt = ecore_chain_get_page_cnt(&cq->pbl);
1940                 pbl_ptr = ecore_chain_get_pbl_phys(&cq->pbl);
1941                 cq->ibcq.cqe = cq->pbl.capacity;
1942         }
1943
1944         params.cq_handle_hi = upper_32_bits((uintptr_t)cq);
1945         params.cq_handle_lo = lower_32_bits((uintptr_t)cq);
1946         params.cnq_id = vector;
1947         params.cq_size = chain_entries - 1;
1948         params.pbl_num_pages = page_cnt;
1949         params.pbl_ptr = pbl_ptr;
1950         params.pbl_two_level = 0;
1951
1952         if (ib_ctx != NULL) {
1953                 ctx = get_qlnxr_ucontext(ib_ctx);
1954                 params.dpi = ctx->dpi;
1955         } else {
1956                 params.dpi = dev->dpi;
1957         }
1958
1959         rc = ecore_rdma_create_cq(dev->rdma_ctx, &params, &icid);
1960         if (rc)
1961                 goto err2;
1962
1963         cq->icid = icid;
1964         cq->sig = QLNXR_CQ_MAGIC_NUMBER;
1965         spin_lock_init(&cq->cq_lock);
1966
1967         if (ib_ctx) {
1968                 rc = qlnxr_copy_cq_uresp(dev, cq, udata);
1969                 if (rc)
1970                         goto err3;
1971         } else {
1972                 /* Generate doorbell address.
1973                  * Configure bits 3-9 with DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT.
1974                  * TODO: consider moving to device scope as it is a function of
1975                  *       the device.
1976                  * TODO: add ifdef if plan to support 16 bit.
1977                  */
1978                 cq->db_addr = dev->db_addr +
1979                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1980                 cq->db.data.icid = cq->icid;
1981                 cq->db.data.params = DB_AGG_CMD_SET <<
1982                                      RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1983
1984                 /* point to the very last element, passing it we will toggle */
1985                 cq->toggle_cqe = ecore_chain_get_last_elem(&cq->pbl);
1986                 cq->pbl_toggle = RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1987
1988                 /* must be different from pbl_toggle */
1989                 cq->latest_cqe = NULL;
1990                 consume_cqe(cq);
1991                 cq->cq_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
1992         }
1993
1994         QL_DPRINT12(ha, "exit icid = 0x%0x, addr = %p,"
1995                 " number of entries = 0x%x\n",
1996                 cq->icid, cq, params.cq_size);
1997         QL_DPRINT12(ha,"cq_addr = %p\n", cq);
1998         return &cq->ibcq;
1999
2000 err3:
2001         destroy_iparams.icid = cq->icid;
2002         ecore_rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, &destroy_oparams);
2003 err2:
2004         if (udata)
2005                 qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2006         else
2007                 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2008 err1:
2009         if (udata)
2010                 ib_umem_release(cq->q.umem);
2011 err0:
2012         kfree(cq);
2013
2014         QL_DPRINT12(ha, "exit error\n");
2015
2016         return ERR_PTR(-EINVAL);
2017 }
2018
2019 int qlnxr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
2020 {
2021         int                     status = 0;
2022         struct qlnxr_dev        *dev = get_qlnxr_dev((ibcq->device));
2023         qlnx_host_t             *ha;
2024
2025         ha = dev->ha;
2026
2027         QL_DPRINT12(ha, "enter/exit\n");
2028
2029         return status;
2030 }
2031
2032 int
2033 qlnxr_destroy_cq(struct ib_cq *ibcq)
2034 {
2035         struct qlnxr_dev                        *dev = get_qlnxr_dev((ibcq->device));
2036         struct ecore_rdma_destroy_cq_out_params oparams;
2037         struct ecore_rdma_destroy_cq_in_params  iparams;
2038         struct qlnxr_cq                         *cq = get_qlnxr_cq(ibcq);
2039         int                                     rc = 0;
2040         qlnx_host_t                             *ha;
2041
2042         ha = dev->ha;
2043
2044         QL_DPRINT12(ha, "enter cq_id = %d\n", cq->icid);
2045
2046         cq->destroyed = 1;
2047
2048         /* TODO: Syncronize irq of the CNQ the CQ belongs to for validation
2049          * that all completions with notification are dealt with. The rest
2050          * of the completions are not interesting
2051          */
2052
2053         /* GSIs CQs are handled by driver, so they don't exist in the FW */
2054
2055         if (cq->cq_type != QLNXR_CQ_TYPE_GSI) {
2056                 iparams.icid = cq->icid;
2057
2058                 rc = ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
2059
2060                 if (rc) {
2061                         QL_DPRINT12(ha, "ecore_rdma_destroy_cq failed cq_id = %d\n",
2062                                 cq->icid);
2063                         return rc;
2064                 }
2065
2066                 QL_DPRINT12(ha, "free cq->pbl cq_id = %d\n", cq->icid);
2067                 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2068         }
2069
2070         if (ibcq->uobject && ibcq->uobject->context) {
2071                 qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2072                 ib_umem_release(cq->q.umem);
2073         }
2074
2075         cq->sig = ~cq->sig;
2076
2077         kfree(cq);
2078
2079         QL_DPRINT12(ha, "exit cq_id = %d\n", cq->icid);
2080
2081         return rc;
2082 }
2083
2084 static int
2085 qlnxr_check_qp_attrs(struct ib_pd *ibpd,
2086         struct qlnxr_dev *dev,
2087         struct ib_qp_init_attr *attrs,
2088         struct ib_udata *udata)
2089 {
2090         struct ecore_rdma_device        *qattr;
2091         qlnx_host_t                     *ha;
2092
2093         qattr = ecore_rdma_query_device(dev->rdma_ctx);
2094         ha = dev->ha;
2095
2096         QL_DPRINT12(ha, "enter\n");
2097
2098         QL_DPRINT12(ha, "attrs->sq_sig_type = %d\n", attrs->sq_sig_type);
2099         QL_DPRINT12(ha, "attrs->qp_type = %d\n", attrs->qp_type);
2100         QL_DPRINT12(ha, "attrs->create_flags = %d\n", attrs->create_flags);
2101
2102 #if __FreeBSD_version < 1102000
2103         QL_DPRINT12(ha, "attrs->qpg_type = %d\n", attrs->qpg_type);
2104 #endif
2105
2106         QL_DPRINT12(ha, "attrs->port_num = %d\n", attrs->port_num);
2107         QL_DPRINT12(ha, "attrs->cap.max_send_wr = 0x%x\n", attrs->cap.max_send_wr);
2108         QL_DPRINT12(ha, "attrs->cap.max_recv_wr = 0x%x\n", attrs->cap.max_recv_wr);
2109         QL_DPRINT12(ha, "attrs->cap.max_send_sge = 0x%x\n", attrs->cap.max_send_sge);
2110         QL_DPRINT12(ha, "attrs->cap.max_recv_sge = 0x%x\n", attrs->cap.max_recv_sge);
2111         QL_DPRINT12(ha, "attrs->cap.max_inline_data = 0x%x\n",
2112                 attrs->cap.max_inline_data);
2113
2114 #if __FreeBSD_version < 1102000
2115         QL_DPRINT12(ha, "attrs->cap.qpg_tss_mask_sz = 0x%x\n",
2116                 attrs->cap.qpg_tss_mask_sz);
2117 #endif
2118
2119         QL_DPRINT12(ha, "\n\nqattr->vendor_id = 0x%x\n", qattr->vendor_id);
2120         QL_DPRINT12(ha, "qattr->vendor_part_id = 0x%x\n", qattr->vendor_part_id);
2121         QL_DPRINT12(ha, "qattr->hw_ver = 0x%x\n", qattr->hw_ver);
2122         QL_DPRINT12(ha, "qattr->fw_ver = %p\n", (void *)qattr->fw_ver);
2123         QL_DPRINT12(ha, "qattr->node_guid = %p\n", (void *)qattr->node_guid);
2124         QL_DPRINT12(ha, "qattr->sys_image_guid = %p\n",
2125                 (void *)qattr->sys_image_guid);
2126         QL_DPRINT12(ha, "qattr->max_cnq = 0x%x\n", qattr->max_cnq);
2127         QL_DPRINT12(ha, "qattr->max_sge = 0x%x\n", qattr->max_sge);
2128         QL_DPRINT12(ha, "qattr->max_srq_sge = 0x%x\n", qattr->max_srq_sge);
2129         QL_DPRINT12(ha, "qattr->max_inline = 0x%x\n", qattr->max_inline);
2130         QL_DPRINT12(ha, "qattr->max_wqe = 0x%x\n", qattr->max_wqe);
2131         QL_DPRINT12(ha, "qattr->max_srq_wqe = 0x%x\n", qattr->max_srq_wqe);
2132         QL_DPRINT12(ha, "qattr->max_qp_resp_rd_atomic_resc = 0x%x\n",
2133                 qattr->max_qp_resp_rd_atomic_resc);
2134         QL_DPRINT12(ha, "qattr->max_qp_req_rd_atomic_resc = 0x%x\n",
2135                 qattr->max_qp_req_rd_atomic_resc);
2136         QL_DPRINT12(ha, "qattr->max_dev_resp_rd_atomic_resc = 0x%x\n",
2137                 qattr->max_dev_resp_rd_atomic_resc);
2138         QL_DPRINT12(ha, "qattr->max_cq = 0x%x\n", qattr->max_cq);
2139         QL_DPRINT12(ha, "qattr->max_qp = 0x%x\n", qattr->max_qp);
2140         QL_DPRINT12(ha, "qattr->max_srq = 0x%x\n", qattr->max_srq);
2141         QL_DPRINT12(ha, "qattr->max_mr = 0x%x\n", qattr->max_mr);
2142         QL_DPRINT12(ha, "qattr->max_mr_size = %p\n", (void *)qattr->max_mr_size);
2143         QL_DPRINT12(ha, "qattr->max_cqe = 0x%x\n", qattr->max_cqe);
2144         QL_DPRINT12(ha, "qattr->max_mw = 0x%x\n", qattr->max_mw);
2145         QL_DPRINT12(ha, "qattr->max_fmr = 0x%x\n", qattr->max_fmr);
2146         QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_pbl = 0x%x\n",
2147                 qattr->max_mr_mw_fmr_pbl);
2148         QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_size = %p\n",
2149                 (void *)qattr->max_mr_mw_fmr_size);
2150         QL_DPRINT12(ha, "qattr->max_pd = 0x%x\n", qattr->max_pd);
2151         QL_DPRINT12(ha, "qattr->max_ah = 0x%x\n", qattr->max_ah);
2152         QL_DPRINT12(ha, "qattr->max_pkey = 0x%x\n", qattr->max_pkey);
2153         QL_DPRINT12(ha, "qattr->max_srq_wr = 0x%x\n", qattr->max_srq_wr);
2154         QL_DPRINT12(ha, "qattr->max_stats_queues = 0x%x\n",
2155                 qattr->max_stats_queues);
2156         //QL_DPRINT12(ha, "qattr->dev_caps = 0x%x\n", qattr->dev_caps);
2157         QL_DPRINT12(ha, "qattr->page_size_caps = %p\n",
2158                 (void *)qattr->page_size_caps);
2159         QL_DPRINT12(ha, "qattr->dev_ack_delay = 0x%x\n", qattr->dev_ack_delay);
2160         QL_DPRINT12(ha, "qattr->reserved_lkey = 0x%x\n", qattr->reserved_lkey);
2161         QL_DPRINT12(ha, "qattr->bad_pkey_counter = 0x%x\n",
2162                 qattr->bad_pkey_counter);
2163
2164         if ((attrs->qp_type == IB_QPT_GSI) && udata) {
2165                 QL_DPRINT12(ha, "unexpected udata when creating GSI QP\n");
2166                 return -EINVAL;
2167         }
2168
2169         if (udata && !(ibpd->uobject && ibpd->uobject->context)) {
2170                 QL_DPRINT12(ha, "called from user without context\n");
2171                 return -EINVAL;
2172         }
2173
2174         /* QP0... attrs->qp_type == IB_QPT_GSI */
2175         if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
2176                 QL_DPRINT12(ha, "unsupported qp type=0x%x requested\n", 
2177                            attrs->qp_type);
2178                 return -EINVAL;
2179         }
2180         if (attrs->qp_type == IB_QPT_GSI && attrs->srq) {
2181                 QL_DPRINT12(ha, "cannot create GSI qp with SRQ\n");
2182                 return -EINVAL;
2183         }
2184         /* Skip the check for QP1 to support CM size of 128 */
2185         if (attrs->cap.max_send_wr > qattr->max_wqe) {
2186                 QL_DPRINT12(ha, "cannot create a SQ with %d elements "
2187                         " (max_send_wr=0x%x)\n",
2188                         attrs->cap.max_send_wr, qattr->max_wqe);
2189                 return -EINVAL;
2190         }
2191         if (!attrs->srq && (attrs->cap.max_recv_wr > qattr->max_wqe)) {
2192                 QL_DPRINT12(ha, "cannot create a RQ with %d elements"
2193                         " (max_recv_wr=0x%x)\n",
2194                         attrs->cap.max_recv_wr, qattr->max_wqe);
2195                 return -EINVAL;
2196         }
2197         if (attrs->cap.max_inline_data > qattr->max_inline) {
2198                 QL_DPRINT12(ha,
2199                         "unsupported inline data size=0x%x "
2200                         "requested (max_inline=0x%x)\n",
2201                         attrs->cap.max_inline_data, qattr->max_inline);
2202                 return -EINVAL;
2203         }
2204         if (attrs->cap.max_send_sge > qattr->max_sge) {
2205                 QL_DPRINT12(ha,
2206                         "unsupported send_sge=0x%x "
2207                         "requested (max_send_sge=0x%x)\n",
2208                         attrs->cap.max_send_sge, qattr->max_sge);
2209                 return -EINVAL;
2210         }
2211         if (attrs->cap.max_recv_sge > qattr->max_sge) {
2212                 QL_DPRINT12(ha,
2213                         "unsupported recv_sge=0x%x requested "
2214                         " (max_recv_sge=0x%x)\n",
2215                         attrs->cap.max_recv_sge, qattr->max_sge);
2216                 return -EINVAL;
2217         }
2218         /* unprivileged user space cannot create special QP */
2219         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
2220                 QL_DPRINT12(ha,
2221                         "userspace can't create special QPs of type=0x%x\n",
2222                         attrs->qp_type);
2223                 return -EINVAL;
2224         }
2225         /* allow creating only one GSI type of QP */
2226         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
2227                 QL_DPRINT12(ha,
2228                         "create qp: GSI special QPs already created.\n");
2229                 return -EINVAL;
2230         }
2231
2232         /* verify consumer QPs are not trying to use GSI QP's CQ */
2233         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
2234                 struct qlnxr_cq *send_cq = get_qlnxr_cq(attrs->send_cq);
2235                 struct qlnxr_cq *recv_cq = get_qlnxr_cq(attrs->recv_cq);
2236
2237                 if ((send_cq->cq_type == QLNXR_CQ_TYPE_GSI) ||
2238                     (recv_cq->cq_type == QLNXR_CQ_TYPE_GSI)) {
2239                         QL_DPRINT11(ha, "consumer QP cannot use GSI CQs.\n");
2240                         return -EINVAL;
2241                 }
2242         }
2243         QL_DPRINT12(ha, "exit\n");
2244         return 0;
2245 }
2246
2247 static int
2248 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
2249         struct qlnxr_srq *srq,
2250         struct ib_udata *udata)
2251 {
2252         struct qlnxr_create_srq_uresp   uresp;
2253         qlnx_host_t                     *ha;
2254         int                             rc;
2255
2256         ha = dev->ha;
2257
2258         QL_DPRINT12(ha, "enter\n");
2259
2260         memset(&uresp, 0, sizeof(uresp));
2261
2262         uresp.srq_id = srq->srq_id;
2263
2264         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2265
2266         QL_DPRINT12(ha, "exit [%d]\n", rc);
2267         return rc;
2268 }
2269
2270 static void
2271 qlnxr_copy_rq_uresp(struct qlnxr_dev *dev,
2272         struct qlnxr_create_qp_uresp *uresp,
2273         struct qlnxr_qp *qp)
2274 {
2275         qlnx_host_t     *ha;
2276
2277         ha = dev->ha;
2278
2279         /* Return if QP is associated with SRQ instead of RQ */
2280         QL_DPRINT12(ha, "enter qp->srq = %p\n", qp->srq);
2281
2282         if (qp->srq)
2283                 return;
2284
2285         /* iWARP requires two doorbells per RQ. */
2286         if (QLNX_IS_IWARP(dev)) {
2287                 uresp->rq_db_offset =
2288                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2289                 uresp->rq_db2_offset =
2290                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2291
2292                 QL_DPRINT12(ha, "uresp->rq_db_offset = 0x%x "
2293                         "uresp->rq_db2_offset = 0x%x\n",
2294                         uresp->rq_db_offset, uresp->rq_db2_offset);
2295         } else {
2296                 uresp->rq_db_offset =
2297                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2298         }
2299         uresp->rq_icid = qp->icid;
2300
2301         QL_DPRINT12(ha, "exit\n");
2302         return;
2303 }
2304
2305 static void
2306 qlnxr_copy_sq_uresp(struct qlnxr_dev *dev,
2307         struct qlnxr_create_qp_uresp *uresp,
2308         struct qlnxr_qp *qp)
2309 {
2310         qlnx_host_t     *ha;
2311
2312         ha = dev->ha;
2313
2314         QL_DPRINT12(ha, "enter\n");
2315
2316         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2317
2318         /* iWARP uses the same cid for rq and sq*/
2319         if (QLNX_IS_IWARP(dev)) {
2320                 uresp->sq_icid = qp->icid;
2321                 QL_DPRINT12(ha, "uresp->sq_icid = 0x%x\n", uresp->sq_icid);
2322         } else
2323                 uresp->sq_icid = qp->icid + 1;
2324
2325         QL_DPRINT12(ha, "exit\n");
2326         return;
2327 }
2328
2329 static int
2330 qlnxr_copy_qp_uresp(struct qlnxr_dev *dev,
2331         struct qlnxr_qp *qp,
2332         struct ib_udata *udata)
2333 {
2334         int                             rc;
2335         struct qlnxr_create_qp_uresp    uresp;
2336         qlnx_host_t                     *ha;
2337
2338         ha = dev->ha;
2339
2340         QL_DPRINT12(ha, "enter qp->icid =0x%x\n", qp->icid);
2341
2342         memset(&uresp, 0, sizeof(uresp));
2343         qlnxr_copy_sq_uresp(dev, &uresp, qp);
2344         qlnxr_copy_rq_uresp(dev, &uresp, qp);
2345
2346         uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
2347         uresp.qp_id = qp->qp_id;
2348
2349         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2350
2351         QL_DPRINT12(ha, "exit [%d]\n", rc);
2352         return rc;
2353 }
2354
2355 static void
2356 qlnxr_set_common_qp_params(struct qlnxr_dev *dev,
2357         struct qlnxr_qp *qp,
2358         struct qlnxr_pd *pd,
2359         struct ib_qp_init_attr *attrs)
2360 {
2361         qlnx_host_t                     *ha;
2362
2363         ha = dev->ha;
2364
2365         QL_DPRINT12(ha, "enter\n");
2366
2367         spin_lock_init(&qp->q_lock);
2368
2369         atomic_set(&qp->refcnt, 1);
2370         qp->pd = pd;
2371         qp->sig = QLNXR_QP_MAGIC_NUMBER;
2372         qp->qp_type = attrs->qp_type;
2373         qp->max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2374         qp->sq.max_sges = attrs->cap.max_send_sge;
2375         qp->state = ECORE_ROCE_QP_STATE_RESET;
2376         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
2377         qp->sq_cq = get_qlnxr_cq(attrs->send_cq);
2378         qp->rq_cq = get_qlnxr_cq(attrs->recv_cq);
2379         qp->dev = dev;
2380
2381         if (!attrs->srq) {
2382                 /* QP is associated with RQ instead of SRQ */
2383                 qp->rq.max_sges = attrs->cap.max_recv_sge;
2384                 QL_DPRINT12(ha, "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
2385                         qp->rq.max_sges, qp->rq_cq->icid);
2386         } else {
2387                 qp->srq = get_qlnxr_srq(attrs->srq);
2388         }
2389
2390         QL_DPRINT12(ha,
2391                 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d,"
2392                 " state = %d, signaled = %d, use_srq=%d\n",
2393                 pd->pd_id, qp->qp_type, qp->max_inline_data,
2394                 qp->state, qp->signaled, ((attrs->srq) ? 1 : 0));
2395         QL_DPRINT12(ha, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
2396                 qp->sq.max_sges, qp->sq_cq->icid);
2397         return;
2398 }
2399
2400 static int
2401 qlnxr_check_srq_params(struct ib_pd *ibpd,
2402         struct qlnxr_dev *dev,
2403         struct ib_srq_init_attr *attrs)
2404 {
2405         struct ecore_rdma_device *qattr;
2406         qlnx_host_t             *ha;
2407
2408         ha = dev->ha;
2409         qattr = ecore_rdma_query_device(dev->rdma_ctx);
2410
2411         QL_DPRINT12(ha, "enter\n");
2412
2413         if (attrs->attr.max_wr > qattr->max_srq_wqe) {
2414                 QL_DPRINT12(ha, "unsupported srq_wr=0x%x"
2415                         " requested (max_srq_wr=0x%x)\n",
2416                         attrs->attr.max_wr, qattr->max_srq_wr);
2417                 return -EINVAL;
2418         }
2419
2420         if (attrs->attr.max_sge > qattr->max_sge) {
2421                 QL_DPRINT12(ha,
2422                         "unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
2423                         attrs->attr.max_sge, qattr->max_sge);
2424                 return -EINVAL;
2425         }
2426
2427         if (attrs->attr.srq_limit > attrs->attr.max_wr) {
2428                 QL_DPRINT12(ha,
2429                        "unsupported srq_limit=0x%x requested"
2430                         " (max_srq_limit=0x%x)\n",
2431                         attrs->attr.srq_limit, attrs->attr.srq_limit);
2432                 return -EINVAL;
2433         }
2434
2435         QL_DPRINT12(ha, "exit\n");
2436         return 0;
2437 }
2438
2439 static void
2440 qlnxr_free_srq_user_params(struct qlnxr_srq *srq)
2441 {
2442         struct qlnxr_dev        *dev = srq->dev;
2443         qlnx_host_t             *ha;
2444
2445         ha = dev->ha;
2446
2447         QL_DPRINT12(ha, "enter\n");
2448
2449         qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2450         ib_umem_release(srq->usrq.umem);
2451         ib_umem_release(srq->prod_umem);
2452
2453         QL_DPRINT12(ha, "exit\n");
2454         return;
2455 }
2456
2457 static void
2458 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq)
2459 {
2460         struct qlnxr_srq_hwq_info *hw_srq  = &srq->hw_srq;
2461         struct qlnxr_dev        *dev = srq->dev;
2462         qlnx_host_t             *ha;
2463
2464         ha = dev->ha;
2465
2466         QL_DPRINT12(ha, "enter\n");
2467
2468         ecore_chain_free(dev->cdev, &hw_srq->pbl);
2469
2470         qlnx_dma_free_coherent(&dev->cdev,
2471                 hw_srq->virt_prod_pair_addr,
2472                 hw_srq->phy_prod_pair_addr,
2473                 sizeof(struct rdma_srq_producers));
2474
2475         QL_DPRINT12(ha, "exit\n");
2476
2477         return;
2478 }
2479
2480 static int
2481 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
2482         struct qlnxr_srq *srq,
2483         struct qlnxr_create_srq_ureq *ureq,
2484         int access, int dmasync)
2485 {
2486 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2487         struct ib_umem_chunk    *chunk;
2488 #endif
2489         struct scatterlist      *sg;
2490         int                     rc;
2491         struct qlnxr_dev        *dev = srq->dev;
2492         qlnx_host_t             *ha;
2493
2494         ha = dev->ha;
2495
2496         QL_DPRINT12(ha, "enter\n");
2497
2498         rc = qlnxr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
2499                                   ureq->srq_len, access, dmasync, 1);
2500         if (rc)
2501                 return rc;
2502
2503         srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
2504                                      sizeof(struct rdma_srq_producers),
2505                                      access, dmasync);
2506         if (IS_ERR(srq->prod_umem)) {
2507                 qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2508                 ib_umem_release(srq->usrq.umem);
2509
2510                 QL_DPRINT12(ha, "ib_umem_get failed for producer [%p]\n",
2511                         PTR_ERR(srq->prod_umem));
2512
2513                 return PTR_ERR(srq->prod_umem);
2514         }
2515
2516 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2517         chunk = container_of((&srq->prod_umem->chunk_list)->next,
2518                              typeof(*chunk), list);
2519         sg = &chunk->page_list[0];
2520 #else
2521         sg = srq->prod_umem->sg_head.sgl;
2522 #endif
2523         srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
2524
2525         QL_DPRINT12(ha, "exit\n");
2526         return 0;
2527 }
2528
2529 static int
2530 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
2531         struct qlnxr_dev *dev,
2532         struct ib_srq_init_attr *init_attr)
2533 {
2534         struct qlnxr_srq_hwq_info       *hw_srq  = &srq->hw_srq;
2535         dma_addr_t                      phy_prod_pair_addr;
2536         u32                             num_elems, max_wr;
2537         void                            *va;
2538         int                             rc;
2539         qlnx_host_t                     *ha;
2540
2541         ha = dev->ha;
2542
2543         QL_DPRINT12(ha, "enter\n");
2544
2545         va = qlnx_dma_alloc_coherent(&dev->cdev,
2546                         &phy_prod_pair_addr,
2547                         sizeof(struct rdma_srq_producers));
2548         if (!va) {
2549                 QL_DPRINT11(ha, "qlnx_dma_alloc_coherent failed for produceer\n");
2550                 return -ENOMEM;
2551         }
2552
2553         hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
2554         hw_srq->virt_prod_pair_addr = va;
2555
2556         max_wr = init_attr->attr.max_wr;
2557
2558         num_elems = max_wr * RDMA_MAX_SRQ_WQE_SIZE;
2559
2560         rc = ecore_chain_alloc(dev->cdev,
2561                    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2562                    ECORE_CHAIN_MODE_PBL,
2563                    ECORE_CHAIN_CNT_TYPE_U32,
2564                    num_elems,
2565                    ECORE_RDMA_SRQ_WQE_ELEM_SIZE,
2566                    &hw_srq->pbl, NULL);
2567
2568         if (rc) {
2569                 QL_DPRINT11(ha, "ecore_chain_alloc failed [%d]\n", rc);
2570                 goto err0;
2571         }
2572
2573         hw_srq->max_wr = max_wr;
2574         hw_srq->num_elems = num_elems;
2575         hw_srq->max_sges = RDMA_MAX_SGE_PER_SRQ;
2576
2577         QL_DPRINT12(ha, "exit\n");
2578         return 0;
2579
2580 err0:
2581         qlnx_dma_free_coherent(&dev->cdev, va, phy_prod_pair_addr,
2582                 sizeof(struct rdma_srq_producers));
2583
2584         QL_DPRINT12(ha, "exit [%d]\n", rc);
2585         return rc;
2586 }
2587
2588 static inline void
2589 qlnxr_init_common_qp_in_params(struct qlnxr_dev *dev,
2590         struct qlnxr_pd *pd,
2591         struct qlnxr_qp *qp,
2592         struct ib_qp_init_attr *attrs,
2593         bool fmr_and_reserved_lkey,
2594         struct ecore_rdma_create_qp_in_params *params)
2595 {
2596         qlnx_host_t     *ha;
2597
2598         ha = dev->ha;
2599
2600         QL_DPRINT12(ha, "enter\n");
2601
2602         /* QP handle to be written in an async event */
2603         params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
2604         params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
2605
2606         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
2607         params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
2608         params->pd = pd->pd_id;
2609         params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
2610         params->sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
2611         params->stats_queue = 0;
2612
2613         params->rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
2614
2615         if (qp->srq) {
2616                 /* QP is associated with SRQ instead of RQ */
2617                 params->srq_id = qp->srq->srq_id;
2618                 params->use_srq = true;
2619                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
2620                         params->srq_id, params->use_srq);
2621                 return;
2622         }
2623
2624         params->srq_id = 0;
2625         params->use_srq = false;
2626
2627         QL_DPRINT12(ha, "exit\n");
2628         return;
2629 }
2630
2631 static inline void
2632 qlnxr_qp_user_print( struct qlnxr_dev *dev,
2633         struct qlnxr_qp *qp)
2634 {
2635         QL_DPRINT12((dev->ha), "qp=%p. sq_addr=0x%llx, sq_len=%zd, "
2636                 "rq_addr=0x%llx, rq_len=%zd\n",
2637                 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
2638                 qp->urq.buf_len);
2639         return;
2640 }
2641
2642 static int
2643 qlnxr_idr_add(struct qlnxr_dev *dev, void *ptr, u32 id)
2644 {
2645         u32             newid;
2646         int             rc;
2647         qlnx_host_t     *ha;
2648
2649         ha = dev->ha;
2650
2651         QL_DPRINT12(ha, "enter\n");
2652
2653         if (!QLNX_IS_IWARP(dev))
2654                 return 0;
2655
2656         do {
2657                 if (!idr_pre_get(&dev->qpidr, GFP_KERNEL)) {
2658                         QL_DPRINT11(ha, "idr_pre_get failed\n");
2659                         return -ENOMEM;
2660                 }
2661
2662                 mtx_lock(&dev->idr_lock);
2663
2664                 rc = idr_get_new_above(&dev->qpidr, ptr, id, &newid);
2665
2666                 mtx_unlock(&dev->idr_lock);
2667
2668         } while (rc == -EAGAIN);
2669
2670         QL_DPRINT12(ha, "exit [%d]\n", rc);
2671
2672         return rc;
2673 }
2674
2675 static void
2676 qlnxr_idr_remove(struct qlnxr_dev *dev, u32 id)
2677 {
2678         qlnx_host_t     *ha;
2679
2680         ha = dev->ha;
2681
2682         QL_DPRINT12(ha, "enter\n");
2683
2684         if (!QLNX_IS_IWARP(dev))
2685                 return;
2686
2687         mtx_lock(&dev->idr_lock);
2688         idr_remove(&dev->qpidr, id);
2689         mtx_unlock(&dev->idr_lock);
2690
2691         QL_DPRINT12(ha, "exit \n");
2692
2693         return;
2694 }
2695
2696 static inline void
2697 qlnxr_iwarp_populate_user_qp(struct qlnxr_dev *dev,
2698         struct qlnxr_qp *qp,
2699         struct ecore_rdma_create_qp_out_params *out_params)
2700 {
2701         qlnx_host_t     *ha;
2702
2703         ha = dev->ha;
2704
2705         QL_DPRINT12(ha, "enter\n");
2706
2707         qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
2708         qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
2709
2710         qlnxr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
2711                            &qp->usq.pbl_info);
2712
2713         if (qp->srq) {
2714                 QL_DPRINT11(ha, "qp->srq = %p\n", qp->srq);
2715                 return;
2716         }
2717
2718         qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
2719         qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
2720
2721         qlnxr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
2722                            &qp->urq.pbl_info);
2723
2724         QL_DPRINT12(ha, "exit\n");
2725         return;
2726 }
2727
2728 static int
2729 qlnxr_create_user_qp(struct qlnxr_dev *dev,
2730         struct qlnxr_qp *qp,
2731         struct ib_pd *ibpd,
2732         struct ib_udata *udata,
2733         struct ib_qp_init_attr *attrs)
2734 {
2735         struct ecore_rdma_destroy_qp_out_params d_out_params;
2736         struct ecore_rdma_create_qp_in_params in_params;
2737         struct ecore_rdma_create_qp_out_params out_params;
2738         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
2739         struct ib_ucontext *ib_ctx = NULL;
2740         struct qlnxr_ucontext *ctx = NULL;
2741         struct qlnxr_create_qp_ureq ureq;
2742         int alloc_and_init = QLNX_IS_ROCE(dev);
2743         int rc = -EINVAL;
2744         qlnx_host_t     *ha;
2745
2746         ha = dev->ha;
2747
2748         QL_DPRINT12(ha, "enter\n");
2749
2750         ib_ctx = ibpd->uobject->context;
2751         ctx = get_qlnxr_ucontext(ib_ctx);
2752
2753         memset(&ureq, 0, sizeof(ureq));
2754         rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
2755
2756         if (rc) {
2757                 QL_DPRINT11(ha, "ib_copy_from_udata failed [%d]\n", rc);
2758                 return rc;
2759         }
2760
2761         /* SQ - read access only (0), dma sync not required (0) */
2762         rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
2763                                   ureq.sq_len, 0, 0,
2764                                   alloc_and_init);
2765         if (rc) {
2766                 QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2767                 return rc;
2768         }
2769
2770         if (!qp->srq) {
2771                 /* RQ - read access only (0), dma sync not required (0) */
2772                 rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
2773                                           ureq.rq_len, 0, 0,
2774                                           alloc_and_init);
2775
2776                 if (rc) {
2777                         QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2778                         return rc;
2779                 }
2780         }
2781
2782         memset(&in_params, 0, sizeof(in_params));
2783         qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
2784         in_params.qp_handle_lo = ureq.qp_handle_lo;
2785         in_params.qp_handle_hi = ureq.qp_handle_hi;
2786         in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
2787         in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
2788
2789         if (!qp->srq) {
2790                 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
2791                 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
2792         }
2793
2794         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, &in_params, &out_params);
2795
2796         if (!qp->ecore_qp) {
2797                 rc = -ENOMEM;
2798                 QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2799                 goto err1;
2800         }
2801
2802         if (QLNX_IS_IWARP(dev))
2803                 qlnxr_iwarp_populate_user_qp(dev, qp, &out_params);
2804
2805         qp->qp_id = out_params.qp_id;
2806         qp->icid = out_params.icid;
2807
2808         rc = qlnxr_copy_qp_uresp(dev, qp, udata);
2809
2810         if (rc) {
2811                 QL_DPRINT11(ha, "qlnxr_copy_qp_uresp failed\n");
2812                 goto err;
2813         }
2814
2815         qlnxr_qp_user_print(dev, qp);
2816
2817         QL_DPRINT12(ha, "exit\n");
2818         return 0;
2819 err:
2820         rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
2821
2822         if (rc)
2823                 QL_DPRINT12(ha, "fatal fault\n");
2824
2825 err1:
2826         qlnxr_cleanup_user(dev, qp);
2827
2828         QL_DPRINT12(ha, "exit[%d]\n", rc);
2829         return rc;
2830 }
2831
2832 static void
2833 qlnxr_set_roce_db_info(struct qlnxr_dev *dev,
2834         struct qlnxr_qp *qp)
2835 {
2836         qlnx_host_t     *ha;
2837
2838         ha = dev->ha;
2839
2840         QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2841
2842         qp->sq.db = dev->db_addr +
2843                 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2844         qp->sq.db_data.data.icid = qp->icid + 1;
2845
2846         if (!qp->srq) {
2847                 qp->rq.db = dev->db_addr +
2848                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2849                 qp->rq.db_data.data.icid = qp->icid;
2850         }
2851
2852         QL_DPRINT12(ha, "exit\n");
2853         return;
2854 }
2855
2856 static void
2857 qlnxr_set_iwarp_db_info(struct qlnxr_dev *dev,
2858         struct qlnxr_qp *qp)
2859
2860 {
2861         qlnx_host_t     *ha;
2862
2863         ha = dev->ha;
2864
2865         QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2866
2867         qp->sq.db = dev->db_addr +
2868                 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2869         qp->sq.db_data.data.icid = qp->icid;
2870
2871         if (!qp->srq) {
2872                 qp->rq.db = dev->db_addr +
2873                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2874                 qp->rq.db_data.data.icid = qp->icid;
2875
2876                 qp->rq.iwarp_db2 = dev->db_addr +
2877                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2878                 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2879                 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2880         }
2881
2882         QL_DPRINT12(ha,
2883                 "qp->sq.db = %p qp->sq.db_data.data.icid =0x%x\n"
2884                 "\t\t\tqp->rq.db = %p qp->rq.db_data.data.icid =0x%x\n"
2885                 "\t\t\tqp->rq.iwarp_db2 = %p qp->rq.iwarp_db2.data.icid =0x%x"
2886                 " qp->rq.iwarp_db2.data.prod_val =0x%x\n",
2887                 qp->sq.db, qp->sq.db_data.data.icid,
2888                 qp->rq.db, qp->rq.db_data.data.icid,
2889                 qp->rq.iwarp_db2, qp->rq.iwarp_db2_data.data.icid,
2890                 qp->rq.iwarp_db2_data.data.value);
2891
2892         QL_DPRINT12(ha, "exit\n");
2893         return;
2894 }
2895
2896 static int
2897 qlnxr_roce_create_kernel_qp(struct qlnxr_dev *dev,
2898         struct qlnxr_qp *qp,
2899         struct ecore_rdma_create_qp_in_params *in_params,
2900         u32 n_sq_elems,
2901         u32 n_rq_elems)
2902 {
2903         struct ecore_rdma_create_qp_out_params out_params;
2904         int             rc;
2905         qlnx_host_t     *ha;
2906
2907         ha = dev->ha;
2908
2909         QL_DPRINT12(ha, "enter\n");
2910
2911         rc = ecore_chain_alloc(
2912                 dev->cdev,
2913                 ECORE_CHAIN_USE_TO_PRODUCE,
2914                 ECORE_CHAIN_MODE_PBL,
2915                 ECORE_CHAIN_CNT_TYPE_U32,
2916                 n_sq_elems,
2917                 QLNXR_SQE_ELEMENT_SIZE,
2918                 &qp->sq.pbl,
2919                 NULL);
2920
2921         if (rc) {
2922                 QL_DPRINT11(ha, "ecore_chain_alloc qp->sq.pbl failed[%d]\n", rc);
2923                 return rc;
2924         }
2925
2926         in_params->sq_num_pages = ecore_chain_get_page_cnt(&qp->sq.pbl);
2927         in_params->sq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->sq.pbl);
2928
2929         if (!qp->srq) {
2930                 rc = ecore_chain_alloc(
2931                         dev->cdev,
2932                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2933                         ECORE_CHAIN_MODE_PBL,
2934                         ECORE_CHAIN_CNT_TYPE_U32,
2935                         n_rq_elems,
2936                         QLNXR_RQE_ELEMENT_SIZE,
2937                         &qp->rq.pbl,
2938                         NULL);
2939
2940                 if (rc) {
2941                         QL_DPRINT11(ha,
2942                                 "ecore_chain_alloc qp->rq.pbl failed[%d]\n", rc);
2943                         return rc;
2944                 }
2945
2946                 in_params->rq_num_pages = ecore_chain_get_page_cnt(&qp->rq.pbl);
2947                 in_params->rq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->rq.pbl);
2948         }
2949
2950         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2951
2952         if (!qp->ecore_qp) {
2953                 QL_DPRINT11(ha, "qp->ecore_qp == NULL\n");
2954                 return -EINVAL;
2955         }
2956
2957         qp->qp_id = out_params.qp_id;
2958         qp->icid = out_params.icid;
2959
2960         qlnxr_set_roce_db_info(dev, qp);
2961
2962         QL_DPRINT12(ha, "exit\n");
2963         return 0;
2964 }
2965
2966 static int
2967 qlnxr_iwarp_create_kernel_qp(struct qlnxr_dev *dev,
2968         struct qlnxr_qp *qp,
2969         struct ecore_rdma_create_qp_in_params *in_params,
2970         u32 n_sq_elems,
2971         u32 n_rq_elems)
2972 {
2973         struct ecore_rdma_destroy_qp_out_params d_out_params;
2974         struct ecore_rdma_create_qp_out_params out_params;
2975         struct ecore_chain_ext_pbl ext_pbl;
2976         int rc;
2977         qlnx_host_t     *ha;
2978
2979         ha = dev->ha;
2980
2981         QL_DPRINT12(ha, "enter\n");
2982
2983         in_params->sq_num_pages = ECORE_CHAIN_PAGE_CNT(n_sq_elems,
2984                                                      QLNXR_SQE_ELEMENT_SIZE,
2985                                                      ECORE_CHAIN_MODE_PBL);
2986         in_params->rq_num_pages = ECORE_CHAIN_PAGE_CNT(n_rq_elems,
2987                                                      QLNXR_RQE_ELEMENT_SIZE,
2988                                                      ECORE_CHAIN_MODE_PBL);
2989
2990         QL_DPRINT12(ha, "n_sq_elems = 0x%x"
2991                 " n_rq_elems = 0x%x in_params\n"
2992                 "\t\t\tqp_handle_lo\t\t= 0x%08x\n"
2993                 "\t\t\tqp_handle_hi\t\t= 0x%08x\n"
2994                 "\t\t\tqp_handle_async_lo\t\t= 0x%08x\n"
2995                 "\t\t\tqp_handle_async_hi\t\t= 0x%08x\n"
2996                 "\t\t\tuse_srq\t\t\t= 0x%x\n"
2997                 "\t\t\tsignal_all\t\t= 0x%x\n"
2998                 "\t\t\tfmr_and_reserved_lkey\t= 0x%x\n"
2999                 "\t\t\tpd\t\t\t= 0x%x\n"
3000                 "\t\t\tdpi\t\t\t= 0x%x\n"
3001                 "\t\t\tsq_cq_id\t\t\t= 0x%x\n"
3002                 "\t\t\tsq_num_pages\t\t= 0x%x\n"
3003                 "\t\t\tsq_pbl_ptr\t\t= %p\n"
3004                 "\t\t\tmax_sq_sges\t\t= 0x%x\n"
3005                 "\t\t\trq_cq_id\t\t\t= 0x%x\n"
3006                 "\t\t\trq_num_pages\t\t= 0x%x\n"
3007                 "\t\t\trq_pbl_ptr\t\t= %p\n"
3008                 "\t\t\tsrq_id\t\t\t= 0x%x\n"
3009                 "\t\t\tstats_queue\t\t= 0x%x\n",
3010                 n_sq_elems, n_rq_elems,
3011                 in_params->qp_handle_lo,
3012                 in_params->qp_handle_hi,
3013                 in_params->qp_handle_async_lo,
3014                 in_params->qp_handle_async_hi,
3015                 in_params->use_srq,
3016                 in_params->signal_all,
3017                 in_params->fmr_and_reserved_lkey,
3018                 in_params->pd,
3019                 in_params->dpi,
3020                 in_params->sq_cq_id,
3021                 in_params->sq_num_pages,
3022                 (void *)in_params->sq_pbl_ptr,
3023                 in_params->max_sq_sges,
3024                 in_params->rq_cq_id,
3025                 in_params->rq_num_pages,
3026                 (void *)in_params->rq_pbl_ptr,
3027                 in_params->srq_id,
3028                 in_params->stats_queue );
3029
3030         memset(&out_params, 0, sizeof (struct ecore_rdma_create_qp_out_params));
3031         memset(&ext_pbl, 0, sizeof (struct ecore_chain_ext_pbl));
3032
3033         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
3034
3035         if (!qp->ecore_qp) {
3036                 QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
3037                 return -EINVAL;
3038         }
3039
3040         /* Now we allocate the chain */
3041         ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
3042         ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
3043
3044         QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
3045                 "ext_pbl.p_pbl_phys = %p\n",
3046                 ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3047                 
3048         rc = ecore_chain_alloc(
3049                 dev->cdev,
3050                 ECORE_CHAIN_USE_TO_PRODUCE,
3051                 ECORE_CHAIN_MODE_PBL,
3052                 ECORE_CHAIN_CNT_TYPE_U32,
3053                 n_sq_elems,
3054                 QLNXR_SQE_ELEMENT_SIZE,
3055                 &qp->sq.pbl,
3056                 &ext_pbl);
3057
3058         if (rc) {
3059                 QL_DPRINT11(ha,
3060                         "ecore_chain_alloc qp->sq.pbl failed rc = %d\n", rc);
3061                 goto err;
3062         }
3063
3064         ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
3065         ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
3066
3067         QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
3068                 "ext_pbl.p_pbl_phys = %p\n",
3069                 ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3070
3071         if (!qp->srq) {
3072                 rc = ecore_chain_alloc(
3073                         dev->cdev,
3074                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
3075                         ECORE_CHAIN_MODE_PBL,
3076                         ECORE_CHAIN_CNT_TYPE_U32,
3077                         n_rq_elems,
3078                         QLNXR_RQE_ELEMENT_SIZE,
3079                         &qp->rq.pbl,
3080                         &ext_pbl);
3081
3082                 if (rc) {
3083                         QL_DPRINT11(ha,, "ecore_chain_alloc qp->rq.pbl"
3084                                 " failed rc = %d\n", rc);
3085                         goto err;
3086                 }
3087         }
3088
3089         QL_DPRINT12(ha, "qp_id = 0x%x icid =0x%x\n",
3090                 out_params.qp_id, out_params.icid);
3091
3092         qp->qp_id = out_params.qp_id;
3093         qp->icid = out_params.icid;
3094
3095         qlnxr_set_iwarp_db_info(dev, qp);
3096
3097         QL_DPRINT12(ha, "exit\n");
3098         return 0;
3099
3100 err:
3101         ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
3102
3103         QL_DPRINT12(ha, "exit rc = %d\n", rc);
3104         return rc;
3105 }
3106
3107 static int
3108 qlnxr_create_kernel_qp(struct qlnxr_dev *dev,
3109         struct qlnxr_qp *qp,
3110         struct ib_pd *ibpd,
3111         struct ib_qp_init_attr *attrs)
3112 {
3113         struct ecore_rdma_create_qp_in_params in_params;
3114         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3115         int rc = -EINVAL;
3116         u32 n_rq_elems;
3117         u32 n_sq_elems;
3118         u32 n_sq_entries;
3119         struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3120         qlnx_host_t     *ha;
3121
3122         ha = dev->ha;
3123
3124         QL_DPRINT12(ha, "enter\n");
3125
3126         memset(&in_params, 0, sizeof(in_params));
3127
3128         /* A single work request may take up to MAX_SQ_WQE_SIZE elements in
3129          * the ring. The ring should allow at least a single WR, even if the
3130          * user requested none, due to allocation issues.
3131          * We should add an extra WR since the prod and cons indices of
3132          * wqe_wr_id are managed in such a way that the WQ is considered full
3133          * when (prod+1)%max_wr==cons. We currently don't do that because we
3134          * double the number of entries due an iSER issue that pushes far more
3135          * WRs than indicated. If we decline its ib_post_send() then we get
3136          * error prints in the dmesg we'd like to avoid.
3137          */
3138         qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
3139                               qattr->max_wqe);
3140
3141         qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
3142                         GFP_KERNEL);
3143         if (!qp->wqe_wr_id) {
3144                 QL_DPRINT11(ha, "failed SQ shadow memory allocation\n");
3145                 return -ENOMEM;
3146         }
3147
3148         /* QP handle to be written in CQE */
3149         in_params.qp_handle_lo = lower_32_bits((uintptr_t)qp);
3150         in_params.qp_handle_hi = upper_32_bits((uintptr_t)qp);
3151
3152         /* A single work request may take up to MAX_RQ_WQE_SIZE elements in
3153          * the ring. There ring should allow at least a single WR, even if the
3154          * user requested none, due to allocation issues.
3155          */
3156         qp->rq.max_wr = (u16)max_t(u32, attrs->cap.max_recv_wr, 1);
3157
3158         /* Allocate driver internal RQ array */
3159         if (!qp->srq) {
3160                 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
3161                                         GFP_KERNEL);
3162                 if (!qp->rqe_wr_id) {
3163                         QL_DPRINT11(ha, "failed RQ shadow memory allocation\n");
3164                         kfree(qp->wqe_wr_id);
3165                         return -ENOMEM;
3166                 }
3167         }
3168
3169         //qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
3170
3171         in_params.qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
3172         in_params.qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
3173
3174         in_params.signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
3175         in_params.fmr_and_reserved_lkey = true;
3176         in_params.pd = pd->pd_id;
3177         in_params.dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
3178         in_params.sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
3179         in_params.stats_queue = 0;
3180
3181         in_params.rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
3182
3183         if (qp->srq) {
3184                 /* QP is associated with SRQ instead of RQ */
3185                 in_params.srq_id = qp->srq->srq_id;
3186                 in_params.use_srq = true;
3187                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
3188                         in_params.srq_id, in_params.use_srq);
3189         } else {
3190                 in_params.srq_id = 0;
3191                 in_params.use_srq = false;
3192         }
3193
3194         n_sq_entries = attrs->cap.max_send_wr;
3195         n_sq_entries = min_t(u32, n_sq_entries, qattr->max_wqe);
3196         n_sq_entries = max_t(u32, n_sq_entries, 1);
3197         n_sq_elems = n_sq_entries * QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
3198
3199         n_rq_elems = qp->rq.max_wr * QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
3200
3201         if (QLNX_IS_ROCE(dev)) {
3202                 rc = qlnxr_roce_create_kernel_qp(dev, qp, &in_params,
3203                                                 n_sq_elems, n_rq_elems);
3204         } else {
3205                 rc = qlnxr_iwarp_create_kernel_qp(dev, qp, &in_params,
3206                                                  n_sq_elems, n_rq_elems);
3207         }
3208
3209         if (rc)
3210                 qlnxr_cleanup_kernel(dev, qp);
3211
3212         QL_DPRINT12(ha, "exit [%d]\n", rc);
3213         return rc;
3214 }
3215
3216 struct ib_qp *
3217 qlnxr_create_qp(struct ib_pd *ibpd,
3218                 struct ib_qp_init_attr *attrs,
3219                 struct ib_udata *udata)
3220 {
3221         struct qlnxr_dev *dev = get_qlnxr_dev(ibpd->device);
3222         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3223         struct qlnxr_qp *qp;
3224         int rc = 0;
3225         qlnx_host_t     *ha;
3226
3227         ha = dev->ha;
3228
3229         QL_DPRINT12(ha, "enter\n");
3230
3231         rc = qlnxr_check_qp_attrs(ibpd, dev, attrs, udata);
3232         if (rc) {
3233                 QL_DPRINT11(ha, "qlnxr_check_qp_attrs failed [%d]\n", rc);
3234                 return ERR_PTR(rc);
3235         }
3236
3237         QL_DPRINT12(ha, "called from %s, event_handle=%p,"
3238                 " eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
3239                 (udata ? "user library" : "kernel"),
3240                 attrs->event_handler, pd,
3241                 get_qlnxr_cq(attrs->send_cq),
3242                 get_qlnxr_cq(attrs->send_cq)->icid,
3243                 get_qlnxr_cq(attrs->recv_cq),
3244                 get_qlnxr_cq(attrs->recv_cq)->icid);
3245
3246         qp = qlnx_zalloc(sizeof(struct qlnxr_qp));
3247
3248         if (!qp) {
3249                 QL_DPRINT11(ha, "kzalloc(qp) failed\n");
3250                 return ERR_PTR(-ENOMEM);
3251         }
3252
3253         qlnxr_set_common_qp_params(dev, qp, pd, attrs);
3254
3255         if (attrs->qp_type == IB_QPT_GSI) {
3256                 QL_DPRINT11(ha, "calling qlnxr_create_gsi_qp\n");
3257                 return qlnxr_create_gsi_qp(dev, attrs, qp);
3258         }
3259
3260         if (udata) {
3261                 rc = qlnxr_create_user_qp(dev, qp, ibpd, udata, attrs);
3262
3263                 if (rc) {
3264                         QL_DPRINT11(ha, "qlnxr_create_user_qp failed\n");
3265                         goto err;
3266                 }
3267         } else {
3268                 rc = qlnxr_create_kernel_qp(dev, qp, ibpd, attrs);
3269
3270                 if (rc) {
3271                         QL_DPRINT11(ha, "qlnxr_create_kernel_qp failed\n");
3272                         goto err;
3273                 }
3274         }
3275
3276         qp->ibqp.qp_num = qp->qp_id;
3277
3278         rc = qlnxr_idr_add(dev, qp, qp->qp_id);
3279
3280         if (rc) {
3281                 QL_DPRINT11(ha, "qlnxr_idr_add failed\n");
3282                 goto err;
3283         }
3284
3285         QL_DPRINT12(ha, "exit [%p]\n", &qp->ibqp);
3286
3287         return &qp->ibqp;
3288 err:
3289         kfree(qp);
3290
3291         QL_DPRINT12(ha, "failed exit\n");
3292         return ERR_PTR(-EFAULT);
3293 }
3294
3295 static enum ib_qp_state
3296 qlnxr_get_ibqp_state(enum ecore_roce_qp_state qp_state)
3297 {
3298         enum ib_qp_state state = IB_QPS_ERR;
3299
3300         switch (qp_state) {
3301         case ECORE_ROCE_QP_STATE_RESET:
3302                 state = IB_QPS_RESET;
3303                 break;
3304
3305         case ECORE_ROCE_QP_STATE_INIT:
3306                 state = IB_QPS_INIT;
3307                 break;
3308
3309         case ECORE_ROCE_QP_STATE_RTR:
3310                 state = IB_QPS_RTR;
3311                 break;
3312
3313         case ECORE_ROCE_QP_STATE_RTS:
3314                 state = IB_QPS_RTS;
3315                 break;
3316
3317         case ECORE_ROCE_QP_STATE_SQD:
3318                 state = IB_QPS_SQD;
3319                 break;
3320
3321         case ECORE_ROCE_QP_STATE_ERR:
3322                 state = IB_QPS_ERR;
3323                 break;
3324
3325         case ECORE_ROCE_QP_STATE_SQE:
3326                 state = IB_QPS_SQE;
3327                 break;
3328         }
3329         return state;
3330 }
3331
3332 static enum ecore_roce_qp_state
3333 qlnxr_get_state_from_ibqp( enum ib_qp_state qp_state)
3334 {
3335         enum ecore_roce_qp_state ecore_qp_state;
3336
3337         ecore_qp_state = ECORE_ROCE_QP_STATE_ERR;
3338
3339         switch (qp_state) {
3340         case IB_QPS_RESET:
3341                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RESET;
3342                 break;
3343
3344         case IB_QPS_INIT:
3345                 ecore_qp_state =  ECORE_ROCE_QP_STATE_INIT;
3346                 break;
3347
3348         case IB_QPS_RTR:
3349                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RTR;
3350                 break;
3351
3352         case IB_QPS_RTS:
3353                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RTS;
3354                 break;
3355
3356         case IB_QPS_SQD:
3357                 ecore_qp_state =  ECORE_ROCE_QP_STATE_SQD;
3358                 break;
3359
3360         case IB_QPS_ERR:
3361                 ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3362                 break;
3363
3364         default:
3365                 ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3366                 break;
3367         }
3368
3369         return (ecore_qp_state);
3370 }
3371
3372 static void
3373 qlnxr_reset_qp_hwq_info(struct qlnxr_qp_hwq_info *qph)
3374 {
3375         ecore_chain_reset(&qph->pbl);
3376         qph->prod = qph->cons = 0;
3377         qph->wqe_cons = 0;
3378         qph->db_data.data.value = cpu_to_le16(0);
3379
3380         return;
3381 }
3382
3383 static int
3384 qlnxr_update_qp_state(struct qlnxr_dev *dev,
3385         struct qlnxr_qp *qp,
3386         enum ecore_roce_qp_state new_state)
3387 {
3388         int             status = 0;
3389         uint32_t        reg_addr;
3390         struct ecore_dev *cdev;
3391         qlnx_host_t     *ha;
3392
3393         ha = dev->ha;
3394         cdev = &ha->cdev;
3395
3396         QL_DPRINT12(ha, "enter qp = %p new_state = 0x%x qp->state = 0x%x\n",
3397                 qp, new_state, qp->state);
3398
3399         if (new_state == qp->state) {
3400                 return 0;
3401         }
3402
3403         switch (qp->state) {
3404         case ECORE_ROCE_QP_STATE_RESET:
3405                 switch (new_state) {
3406                 case ECORE_ROCE_QP_STATE_INIT:
3407                         qp->prev_wqe_size = 0;
3408                         qlnxr_reset_qp_hwq_info(&qp->sq);
3409                         if (!(qp->srq))
3410                                 qlnxr_reset_qp_hwq_info(&qp->rq);
3411                         break;
3412                 default:
3413                         status = -EINVAL;
3414                         break;
3415                 };
3416                 break;
3417         case ECORE_ROCE_QP_STATE_INIT:
3418                 /* INIT->XXX */
3419                 switch (new_state) {
3420                 case ECORE_ROCE_QP_STATE_RTR:
3421                 /* Update doorbell (in case post_recv was done before move to RTR) */
3422                         if (qp->srq)
3423                                 break;
3424                         wmb();
3425                         //writel(qp->rq.db_data.raw, qp->rq.db);
3426                         //if (QLNX_IS_IWARP(dev))
3427                         //      writel(qp->rq.iwarp_db2_data.raw,
3428                         //             qp->rq.iwarp_db2);
3429
3430                         reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
3431                                         (uint8_t *)cdev->doorbells);
3432
3433                         bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
3434                         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3435
3436                         if (QLNX_IS_IWARP(dev)) {
3437                                 reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
3438                                         (uint8_t *)cdev->doorbells);
3439                                 bus_write_4(ha->pci_dbells, reg_addr,\
3440                                         qp->rq.iwarp_db2_data.raw);
3441                                 bus_barrier(ha->pci_dbells,  0, 0,\
3442                                         BUS_SPACE_BARRIER_READ);
3443                         }
3444
3445                         
3446                         mmiowb();
3447                         break;
3448                 case ECORE_ROCE_QP_STATE_ERR:
3449                         /* TBD:flush qps... */
3450                         break;
3451                 default:
3452                         /* invalid state change. */
3453                         status = -EINVAL;
3454                         break;
3455                 };
3456                 break;
3457         case ECORE_ROCE_QP_STATE_RTR:
3458                 /* RTR->XXX */
3459                 switch (new_state) {
3460                 case ECORE_ROCE_QP_STATE_RTS:
3461                         break;
3462                 case ECORE_ROCE_QP_STATE_ERR:
3463                         break;
3464                 default:
3465                         /* invalid state change. */
3466                         status = -EINVAL;
3467                         break;
3468                 };
3469                 break;
3470         case ECORE_ROCE_QP_STATE_RTS:
3471                 /* RTS->XXX */
3472                 switch (new_state) {
3473                 case ECORE_ROCE_QP_STATE_SQD:
3474                         break;
3475                 case ECORE_ROCE_QP_STATE_ERR:
3476                         break;
3477                 default:
3478                         /* invalid state change. */
3479                         status = -EINVAL;
3480                         break;
3481                 };
3482                 break;
3483         case ECORE_ROCE_QP_STATE_SQD:
3484                 /* SQD->XXX */
3485                 switch (new_state) {
3486                 case ECORE_ROCE_QP_STATE_RTS:
3487                 case ECORE_ROCE_QP_STATE_ERR:
3488                         break;
3489                 default:
3490                         /* invalid state change. */
3491                         status = -EINVAL;
3492                         break;
3493                 };
3494                 break;
3495         case ECORE_ROCE_QP_STATE_ERR:
3496                 /* ERR->XXX */
3497                 switch (new_state) {
3498                 case ECORE_ROCE_QP_STATE_RESET:
3499                         if ((qp->rq.prod != qp->rq.cons) ||
3500                             (qp->sq.prod != qp->sq.cons)) {
3501                                 QL_DPRINT11(ha,
3502                                         "Error->Reset with rq/sq "
3503                                         "not empty rq.prod=0x%x rq.cons=0x%x"
3504                                         " sq.prod=0x%x sq.cons=0x%x\n",
3505                                         qp->rq.prod, qp->rq.cons,
3506                                         qp->sq.prod, qp->sq.cons);
3507                                 status = -EINVAL;
3508                         }
3509                         break;
3510                 default:
3511                         status = -EINVAL;
3512                         break;
3513                 };
3514                 break;
3515         default:
3516                 status = -EINVAL;
3517                 break;
3518         };
3519
3520         QL_DPRINT12(ha, "exit\n");
3521         return status;
3522 }
3523
3524 int
3525 qlnxr_modify_qp(struct ib_qp    *ibqp,
3526         struct ib_qp_attr       *attr,
3527         int                     attr_mask,
3528         struct ib_udata         *udata)
3529 {
3530         int rc = 0;
3531         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3532         struct qlnxr_dev *dev = get_qlnxr_dev(&qp->dev->ibdev);
3533         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
3534         enum ib_qp_state old_qp_state, new_qp_state;
3535         struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3536         qlnx_host_t     *ha;
3537
3538         ha = dev->ha;
3539
3540         QL_DPRINT12(ha,
3541                 "enter qp = %p attr_mask = 0x%x, state = %d udata = %p\n",
3542                 qp, attr_mask, attr->qp_state, udata);
3543
3544         old_qp_state = qlnxr_get_ibqp_state(qp->state);
3545         if (attr_mask & IB_QP_STATE)
3546                 new_qp_state = attr->qp_state;
3547         else
3548                 new_qp_state = old_qp_state;
3549
3550         if (QLNX_IS_ROCE(dev)) {
3551 #if __FreeBSD_version >= 1100000
3552                 if (!ib_modify_qp_is_ok(old_qp_state,
3553                                         new_qp_state,
3554                                         ibqp->qp_type,
3555                                         attr_mask,
3556                                         IB_LINK_LAYER_ETHERNET)) {
3557                         QL_DPRINT12(ha,
3558                                 "invalid attribute mask=0x%x"
3559                                 " specified for qpn=0x%x of type=0x%x \n"
3560                                 " old_qp_state=0x%x, new_qp_state=0x%x\n",
3561                                 attr_mask, qp->qp_id, ibqp->qp_type,
3562                                 old_qp_state, new_qp_state);
3563                         rc = -EINVAL;
3564                         goto err;
3565                 }
3566 #else
3567                 if (!ib_modify_qp_is_ok(old_qp_state,
3568                                         new_qp_state,
3569                                         ibqp->qp_type,
3570                                         attr_mask )) {
3571                         QL_DPRINT12(ha,
3572                                 "invalid attribute mask=0x%x"
3573                                 " specified for qpn=0x%x of type=0x%x \n"
3574                                 " old_qp_state=0x%x, new_qp_state=0x%x\n",
3575                                 attr_mask, qp->qp_id, ibqp->qp_type,
3576                                 old_qp_state, new_qp_state);
3577                         rc = -EINVAL;
3578                         goto err;
3579                 }
3580
3581 #endif /* #if __FreeBSD_version >= 1100000 */
3582         }
3583         /* translate the masks... */
3584         if (attr_mask & IB_QP_STATE) {
3585                 SET_FIELD(qp_params.modify_flags,
3586                           ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
3587                 qp_params.new_state = qlnxr_get_state_from_ibqp(attr->qp_state);
3588         }
3589
3590         // TBD consider changing ecore to be a flag as well...
3591         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
3592                 qp_params.sqd_async = true;
3593
3594         if (attr_mask & IB_QP_PKEY_INDEX) {
3595                 SET_FIELD(qp_params.modify_flags,
3596                           ECORE_ROCE_MODIFY_QP_VALID_PKEY,
3597                           1);
3598                 if (attr->pkey_index >= QLNXR_ROCE_PKEY_TABLE_LEN) {
3599                         rc = -EINVAL;
3600                         goto err;
3601                 }
3602
3603                 qp_params.pkey = QLNXR_ROCE_PKEY_DEFAULT;
3604         }
3605
3606         if (attr_mask & IB_QP_QKEY) {
3607                 qp->qkey = attr->qkey;
3608         }
3609
3610         /* tbd consider splitting in ecore.. */
3611         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3612                 SET_FIELD(qp_params.modify_flags,
3613                           ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
3614                 qp_params.incoming_rdma_read_en =
3615                         attr->qp_access_flags & IB_ACCESS_REMOTE_READ;
3616                 qp_params.incoming_rdma_write_en =
3617                         attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE;
3618                 qp_params.incoming_atomic_en =
3619                         attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC;
3620         }
3621
3622         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
3623                 if (attr_mask & IB_QP_PATH_MTU) {
3624                         if (attr->path_mtu < IB_MTU_256 ||
3625                             attr->path_mtu > IB_MTU_4096) {
3626                                 QL_DPRINT12(ha,
3627                                         "Only MTU sizes of 256, 512, 1024,"
3628                                         " 2048 and 4096 are supported "
3629                                         " attr->path_mtu = [%d]\n",
3630                                         attr->path_mtu);
3631
3632                                 rc = -EINVAL;
3633                                 goto err;
3634                         }
3635                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
3636                                       ib_mtu_enum_to_int(
3637                                                 iboe_get_mtu(dev->ha->ifp->if_mtu)));
3638                 }
3639
3640                 if (qp->mtu == 0) {
3641                         qp->mtu = ib_mtu_enum_to_int(
3642                                         iboe_get_mtu(dev->ha->ifp->if_mtu));
3643                         QL_DPRINT12(ha, "fixing zetoed MTU to qp->mtu = %d\n",
3644                                 qp->mtu);
3645                 }
3646
3647                 SET_FIELD(qp_params.modify_flags,
3648                           ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR,
3649                           1);
3650
3651                 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
3652                 qp_params.flow_label = attr->ah_attr.grh.flow_label;
3653                 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
3654
3655                 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
3656
3657                 get_gid_info(ibqp, attr, attr_mask, dev, qp, &qp_params);
3658
3659                 rc = qlnxr_get_dmac(dev, &attr->ah_attr, qp_params.remote_mac_addr);
3660                 if (rc)
3661                         return rc;
3662
3663                 qp_params.use_local_mac = true;
3664                 memcpy(qp_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
3665
3666                 QL_DPRINT12(ha, "dgid=0x%x:0x%x:0x%x:0x%x\n",
3667                        qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
3668                        qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
3669                 QL_DPRINT12(ha, "sgid=0x%x:0x%x:0x%x:0x%x\n",
3670                        qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
3671                        qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
3672                 QL_DPRINT12(ha,
3673                         "remote_mac=[0x%x:0x%x:0x%x:0x%x:0x%x:0x%x]\n",
3674                         qp_params.remote_mac_addr[0],
3675                         qp_params.remote_mac_addr[1],
3676                         qp_params.remote_mac_addr[2],
3677                         qp_params.remote_mac_addr[3],
3678                         qp_params.remote_mac_addr[4],
3679                         qp_params.remote_mac_addr[5]);
3680
3681                 qp_params.mtu = qp->mtu;
3682         }
3683
3684         if (qp_params.mtu == 0) {
3685                 /* stay with current MTU */
3686                 if (qp->mtu) {
3687                         qp_params.mtu = qp->mtu;
3688                 } else {
3689                         qp_params.mtu = ib_mtu_enum_to_int(
3690                                                 iboe_get_mtu(dev->ha->ifp->if_mtu));
3691                 }
3692         }
3693
3694         if (attr_mask & IB_QP_TIMEOUT) {
3695                 SET_FIELD(qp_params.modify_flags, \
3696                         ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
3697
3698                 qp_params.ack_timeout = attr->timeout;
3699                 if (attr->timeout) {
3700                         u32 temp;
3701
3702                         /* 12.7.34 LOCAL ACK TIMEOUT
3703                          * Value representing the transport (ACK) timeout for
3704                          * use by the remote, expressed as (4.096 Î¼S*2Local ACK
3705                          * Timeout)
3706                          */
3707                         /* We use 1UL since the temporal value may be  overflow
3708                          * 32 bits
3709                          */
3710                         temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
3711                         qp_params.ack_timeout = temp; /* FW requires [msec] */
3712                 }
3713                 else
3714                         qp_params.ack_timeout = 0; /* infinite */
3715         }
3716         if (attr_mask & IB_QP_RETRY_CNT) {
3717                 SET_FIELD(qp_params.modify_flags,\
3718                          ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
3719                 qp_params.retry_cnt = attr->retry_cnt;
3720         }
3721
3722         if (attr_mask & IB_QP_RNR_RETRY) {
3723                 SET_FIELD(qp_params.modify_flags,
3724                           ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT,
3725                           1);
3726                 qp_params.rnr_retry_cnt = attr->rnr_retry;
3727         }
3728
3729         if (attr_mask & IB_QP_RQ_PSN) {
3730                 SET_FIELD(qp_params.modify_flags,
3731                           ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN,
3732                           1);
3733                 qp_params.rq_psn = attr->rq_psn;
3734                 qp->rq_psn = attr->rq_psn;
3735         }
3736
3737         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3738                 if (attr->max_rd_atomic > qattr->max_qp_req_rd_atomic_resc) {
3739                         rc = -EINVAL;
3740                         QL_DPRINT12(ha,
3741                                 "unsupported  max_rd_atomic=%d, supported=%d\n",
3742                                 attr->max_rd_atomic,
3743                                 qattr->max_qp_req_rd_atomic_resc);
3744                         goto err;
3745                 }
3746
3747                 SET_FIELD(qp_params.modify_flags,
3748                           ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ,
3749                           1);
3750                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
3751         }
3752
3753         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
3754                 SET_FIELD(qp_params.modify_flags,
3755                           ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER,
3756                           1);
3757                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
3758         }
3759
3760         if (attr_mask & IB_QP_SQ_PSN) {
3761                 SET_FIELD(qp_params.modify_flags,
3762                           ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN,
3763                           1);
3764                 qp_params.sq_psn = attr->sq_psn;
3765                 qp->sq_psn = attr->sq_psn;
3766         }
3767
3768         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3769                 if (attr->max_dest_rd_atomic >
3770                     qattr->max_qp_resp_rd_atomic_resc) {
3771                         QL_DPRINT12(ha,
3772                                 "unsupported max_dest_rd_atomic=%d, "
3773                                 "supported=%d\n",
3774                                 attr->max_dest_rd_atomic,
3775                                 qattr->max_qp_resp_rd_atomic_resc);
3776
3777                         rc = -EINVAL;
3778                         goto err;
3779                 }
3780
3781                 SET_FIELD(qp_params.modify_flags,
3782                           ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP,
3783                           1);
3784                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
3785         }
3786
3787         if (attr_mask & IB_QP_DEST_QPN) {
3788                 SET_FIELD(qp_params.modify_flags,
3789                           ECORE_ROCE_MODIFY_QP_VALID_DEST_QP,
3790                           1);
3791
3792                 qp_params.dest_qp = attr->dest_qp_num;
3793                 qp->dest_qp_num = attr->dest_qp_num;
3794         }
3795
3796         /*
3797          * Update the QP state before the actual ramrod to prevent a race with
3798          * fast path. Modifying the QP state to error will cause the device to
3799          * flush the CQEs and while polling the flushed CQEs will considered as
3800          * a potential issue if the QP isn't in error state.
3801          */
3802         if ((attr_mask & IB_QP_STATE) && (qp->qp_type != IB_QPT_GSI) &&
3803                 (!udata) && (qp_params.new_state == ECORE_ROCE_QP_STATE_ERR))
3804                 qp->state = ECORE_ROCE_QP_STATE_ERR;
3805
3806         if (qp->qp_type != IB_QPT_GSI)
3807                 rc = ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
3808
3809         if (attr_mask & IB_QP_STATE) {
3810                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
3811                         rc = qlnxr_update_qp_state(dev, qp, qp_params.new_state);
3812                 qp->state = qp_params.new_state;
3813         }
3814
3815 err:
3816         QL_DPRINT12(ha, "exit\n");
3817         return rc;
3818 }
3819
3820 static int
3821 qlnxr_to_ib_qp_acc_flags(struct ecore_rdma_query_qp_out_params *params)
3822 {
3823         int ib_qp_acc_flags = 0;
3824
3825         if (params->incoming_rdma_write_en)
3826                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
3827         if (params->incoming_rdma_read_en)
3828                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
3829         if (params->incoming_atomic_en)
3830                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
3831         if (true) /* FIXME -> local write ?? */
3832                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
3833
3834         return ib_qp_acc_flags;
3835 }
3836
3837 static enum ib_mtu
3838 qlnxr_mtu_int_to_enum(u16 mtu)
3839 {
3840         enum ib_mtu ib_mtu_size;
3841
3842         switch (mtu) {
3843         case 256:
3844                 ib_mtu_size = IB_MTU_256;
3845                 break;
3846
3847         case 512:
3848                 ib_mtu_size = IB_MTU_512;
3849                 break;
3850
3851         case 1024:
3852                 ib_mtu_size = IB_MTU_1024;
3853                 break;
3854
3855         case 2048:
3856                 ib_mtu_size = IB_MTU_2048;
3857                 break;
3858
3859         case 4096:
3860                 ib_mtu_size = IB_MTU_4096;
3861                 break;
3862
3863         default:
3864                 ib_mtu_size = IB_MTU_1024;
3865                 break;
3866         }
3867         return (ib_mtu_size);
3868 }
3869
3870 int
3871 qlnxr_query_qp(struct ib_qp *ibqp,
3872         struct ib_qp_attr *qp_attr,
3873         int attr_mask,
3874         struct ib_qp_init_attr *qp_init_attr)
3875 {
3876         int rc = 0;
3877         struct ecore_rdma_query_qp_out_params params;
3878         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3879         struct qlnxr_dev *dev = qp->dev;
3880         qlnx_host_t     *ha;
3881
3882         ha = dev->ha;
3883
3884         QL_DPRINT12(ha, "enter\n");
3885
3886         memset(&params, 0, sizeof(params));
3887
3888         rc = ecore_rdma_query_qp(dev->rdma_ctx, qp->ecore_qp, &params);
3889         if (rc)
3890                 goto err;
3891
3892         memset(qp_attr, 0, sizeof(*qp_attr));
3893         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3894
3895         qp_attr->qp_state = qlnxr_get_ibqp_state(params.state);
3896         qp_attr->cur_qp_state = qlnxr_get_ibqp_state(params.state);
3897
3898         /* In some cases in iWARP qelr will ask for the state only */
3899         if (QLNX_IS_IWARP(dev) && (attr_mask == IB_QP_STATE)) {
3900                 QL_DPRINT11(ha, "only state requested\n");
3901                 return 0;
3902         }
3903
3904         qp_attr->path_mtu = qlnxr_mtu_int_to_enum(params.mtu);
3905         qp_attr->path_mig_state = IB_MIG_MIGRATED;
3906         qp_attr->rq_psn = params.rq_psn;
3907         qp_attr->sq_psn = params.sq_psn;
3908         qp_attr->dest_qp_num = params.dest_qp;
3909
3910         qp_attr->qp_access_flags = qlnxr_to_ib_qp_acc_flags(&params);
3911
3912         QL_DPRINT12(ha, "qp_state = 0x%x cur_qp_state = 0x%x "
3913                 "path_mtu = %d qp_access_flags = 0x%x\n",
3914                 qp_attr->qp_state, qp_attr->cur_qp_state, qp_attr->path_mtu,
3915                 qp_attr->qp_access_flags);
3916
3917         qp_attr->cap.max_send_wr = qp->sq.max_wr;
3918         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
3919         qp_attr->cap.max_send_sge = qp->sq.max_sges;
3920         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
3921         qp_attr->cap.max_inline_data = qp->max_inline_data;
3922         qp_init_attr->cap = qp_attr->cap;
3923
3924         memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
3925                sizeof(qp_attr->ah_attr.grh.dgid.raw));
3926
3927         qp_attr->ah_attr.grh.flow_label = params.flow_label;
3928         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
3929         qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
3930         qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
3931
3932         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
3933         qp_attr->ah_attr.port_num = 1; /* FIXME -> check this */
3934         qp_attr->ah_attr.sl = 0;/* FIXME -> check this */
3935         qp_attr->timeout = params.timeout;
3936         qp_attr->rnr_retry = params.rnr_retry;
3937         qp_attr->retry_cnt = params.retry_cnt;
3938         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
3939         qp_attr->pkey_index = params.pkey_index;
3940         qp_attr->port_num = 1; /* FIXME -> check this */
3941         qp_attr->ah_attr.src_path_bits = 0;
3942         qp_attr->ah_attr.static_rate = 0;
3943         qp_attr->alt_pkey_index = 0;
3944         qp_attr->alt_port_num = 0;
3945         qp_attr->alt_timeout = 0;
3946         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
3947
3948         qp_attr->sq_draining = (params.state == ECORE_ROCE_QP_STATE_SQD) ? 1 : 0;
3949         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
3950         qp_attr->max_rd_atomic = params.max_rd_atomic;
3951         qp_attr->en_sqd_async_notify = (params.sqd_async)? 1 : 0;
3952
3953         QL_DPRINT12(ha, "max_inline_data=%d\n",
3954                 qp_attr->cap.max_inline_data);
3955
3956 err:
3957         QL_DPRINT12(ha, "exit\n");
3958         return rc;
3959 }
3960
3961 static void
3962 qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3963 {
3964         qlnx_host_t     *ha;
3965
3966         ha = dev->ha;
3967
3968         QL_DPRINT12(ha, "enter\n");
3969
3970         if (qp->usq.umem)
3971                 ib_umem_release(qp->usq.umem);
3972
3973         qp->usq.umem = NULL;
3974
3975         if (qp->urq.umem)
3976                 ib_umem_release(qp->urq.umem);
3977
3978         qp->urq.umem = NULL;
3979
3980         QL_DPRINT12(ha, "exit\n");
3981         return;
3982 }
3983
3984 static void
3985 qlnxr_cleanup_kernel(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3986 {
3987         qlnx_host_t     *ha;
3988
3989         ha = dev->ha;
3990
3991         QL_DPRINT12(ha, "enter\n");
3992
3993         if (qlnxr_qp_has_sq(qp)) {
3994                 QL_DPRINT12(ha, "freeing SQ\n");
3995                 ha->qlnxr_debug = 1;
3996 //              ecore_chain_free(dev->cdev, &qp->sq.pbl);
3997                 ha->qlnxr_debug = 0;
3998                 kfree(qp->wqe_wr_id);
3999         }
4000
4001         if (qlnxr_qp_has_rq(qp)) {
4002                 QL_DPRINT12(ha, "freeing RQ\n");
4003                 ha->qlnxr_debug = 1;
4004         //      ecore_chain_free(dev->cdev, &qp->rq.pbl);
4005                 ha->qlnxr_debug = 0;
4006                 kfree(qp->rqe_wr_id);
4007         }
4008
4009         QL_DPRINT12(ha, "exit\n");
4010         return;
4011 }
4012
4013 int
4014 qlnxr_free_qp_resources(struct qlnxr_dev *dev,
4015         struct qlnxr_qp *qp)
4016 {
4017         int             rc = 0;
4018         qlnx_host_t     *ha;
4019         struct ecore_rdma_destroy_qp_out_params d_out_params;
4020
4021         ha = dev->ha;
4022
4023         QL_DPRINT12(ha, "enter\n");
4024
4025 #if 0
4026         if (qp->qp_type != IB_QPT_GSI) {
4027                 rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
4028                                 &d_out_params);
4029                 if (rc)
4030                         return rc;
4031         }
4032
4033         if (qp->ibqp.uobject && qp->ibqp.uobject->context)
4034                 qlnxr_cleanup_user(dev, qp);
4035         else
4036                 qlnxr_cleanup_kernel(dev, qp);
4037 #endif
4038
4039         if (qp->ibqp.uobject && qp->ibqp.uobject->context)
4040                 qlnxr_cleanup_user(dev, qp);
4041         else
4042                 qlnxr_cleanup_kernel(dev, qp);
4043
4044         if (qp->qp_type != IB_QPT_GSI) {
4045                 rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
4046                                 &d_out_params);
4047                 if (rc)
4048                         return rc;
4049         }
4050
4051         QL_DPRINT12(ha, "exit\n");
4052         return 0;
4053 }
4054
4055 int
4056 qlnxr_destroy_qp(struct ib_qp *ibqp)
4057 {
4058         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
4059         struct qlnxr_dev *dev = qp->dev;
4060         int rc = 0;
4061         struct ib_qp_attr attr;
4062         int attr_mask = 0;
4063         qlnx_host_t     *ha;
4064
4065         ha = dev->ha;
4066
4067         QL_DPRINT12(ha, "enter qp = %p, qp_type=%d\n", qp, qp->qp_type);
4068
4069         qp->destroyed = 1;
4070
4071         if (QLNX_IS_ROCE(dev) && (qp->state != (ECORE_ROCE_QP_STATE_RESET |
4072                                   ECORE_ROCE_QP_STATE_ERR |
4073                                   ECORE_ROCE_QP_STATE_INIT))) {
4074                 attr.qp_state = IB_QPS_ERR;
4075                 attr_mask |= IB_QP_STATE;
4076
4077                 /* change the QP state to ERROR */
4078                 qlnxr_modify_qp(ibqp, &attr, attr_mask, NULL);
4079         }
4080
4081         if (qp->qp_type == IB_QPT_GSI)
4082                 qlnxr_destroy_gsi_qp(dev);
4083
4084         qp->sig = ~qp->sig;
4085
4086         qlnxr_free_qp_resources(dev, qp);
4087
4088         if (atomic_dec_and_test(&qp->refcnt)) {
4089                 /* TODO: only for iWARP? */
4090                 qlnxr_idr_remove(dev, qp->qp_id);
4091                 kfree(qp);
4092         }
4093
4094         QL_DPRINT12(ha, "exit\n");
4095         return rc;
4096 }
4097
4098 static inline int
4099 qlnxr_wq_is_full(struct qlnxr_qp_hwq_info *wq)
4100 {
4101         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
4102 }
4103
4104 static int
4105 sge_data_len(struct ib_sge *sg_list, int num_sge)
4106 {
4107         int i, len = 0;
4108         for (i = 0; i < num_sge; i++)
4109                 len += sg_list[i].length;
4110         return len;
4111 }
4112
4113 static void
4114 swap_wqe_data64(u64 *p)
4115 {
4116         int i;
4117
4118         for (i = 0; i < QLNXR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
4119                 *p = cpu_to_be64(cpu_to_le64(*p));
4120 }
4121
4122 static u32
4123 qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
4124         struct qlnxr_qp         *qp,
4125         u8                      *wqe_size,
4126         struct ib_send_wr       *wr,
4127         struct ib_send_wr       **bad_wr,
4128         u8                      *bits,
4129         u8                      bit)
4130 {
4131         int i, seg_siz;
4132         char *seg_prt, *wqe;
4133         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
4134         qlnx_host_t     *ha;
4135
4136         ha = dev->ha;
4137
4138         QL_DPRINT12(ha, "enter[%d]\n", data_size);
4139
4140         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
4141                 QL_DPRINT12(ha,
4142                         "Too much inline data in WR:[%d, %d]\n",
4143                         data_size, ROCE_REQ_MAX_INLINE_DATA_SIZE);
4144                 *bad_wr = wr;
4145                 return 0;
4146         }
4147
4148         if (!data_size)
4149                 return data_size;
4150
4151         /* set the bit */
4152         *bits |= bit;
4153
4154         seg_prt = wqe = NULL;
4155         seg_siz = 0;
4156
4157         /* copy data inline */
4158         for (i = 0; i < wr->num_sge; i++) {
4159                 u32 len = wr->sg_list[i].length;
4160                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
4161
4162                 while (len > 0) {
4163                         u32 cur;
4164
4165                         /* new segment required */
4166                         if (!seg_siz) {
4167                                 wqe = (char *)ecore_chain_produce(&qp->sq.pbl);
4168                                 seg_prt = wqe;
4169                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
4170                                 (*wqe_size)++;
4171                         }
4172
4173                         /* calculate currently allowed length */
4174                         cur = MIN(len, seg_siz);
4175
4176                         memcpy(seg_prt, src, cur);
4177
4178                         /* update segment variables */
4179                         seg_prt += cur;
4180                         seg_siz -= cur;
4181                         /* update sge variables */
4182                         src += cur;
4183                         len -= cur;
4184
4185                         /* swap fully-completed segments */
4186                         if (!seg_siz)
4187                                 swap_wqe_data64((u64 *)wqe);
4188                 }
4189         }
4190
4191         /* swap last not completed segment */
4192         if (seg_siz)
4193                 swap_wqe_data64((u64 *)wqe);
4194
4195         QL_DPRINT12(ha, "exit\n");
4196         return data_size;
4197 }
4198
4199 static u32
4200 qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
4201         u8 *wqe_size, struct ib_send_wr *wr)
4202 {
4203         int i;
4204         u32 data_size = 0;
4205         qlnx_host_t     *ha;
4206
4207         ha = dev->ha;
4208
4209         QL_DPRINT12(ha, "enter wr->num_sge = %d \n", wr->num_sge);
4210
4211         for (i = 0; i < wr->num_sge; i++) {
4212                 struct rdma_sq_sge *sge = ecore_chain_produce(&qp->sq.pbl);
4213
4214                 TYPEPTR_ADDR_SET(sge, addr, wr->sg_list[i].addr);
4215                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
4216                 sge->length = cpu_to_le32(wr->sg_list[i].length);
4217                 data_size += wr->sg_list[i].length;
4218         }
4219
4220         if (wqe_size)
4221                 *wqe_size += wr->num_sge;
4222
4223         QL_DPRINT12(ha, "exit data_size = %d\n", data_size);
4224         return data_size;
4225 }
4226
4227 static u32
4228 qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
4229         struct qlnxr_qp *qp,
4230         struct rdma_sq_rdma_wqe_1st *rwqe,
4231         struct rdma_sq_rdma_wqe_2nd *rwqe2,
4232         struct ib_send_wr *wr,
4233         struct ib_send_wr **bad_wr)
4234 {
4235         qlnx_host_t     *ha;
4236         u32             ret = 0;
4237
4238         ha = dev->ha;
4239
4240         QL_DPRINT12(ha, "enter\n");
4241
4242         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
4243         TYPEPTR_ADDR_SET(rwqe2, remote_va, rdma_wr(wr)->remote_addr);
4244
4245         if (wr->send_flags & IB_SEND_INLINE) {
4246                 u8 flags = 0;
4247                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
4248                 return qlnxr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size,
4249                                 wr, bad_wr, &rwqe->flags, flags);
4250         }
4251
4252         ret = qlnxr_prepare_sq_sges(dev, qp, &rwqe->wqe_size, wr);
4253
4254         QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4255
4256         return (ret);
4257 }
4258
4259 static u32
4260 qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
4261         struct qlnxr_qp *qp,
4262         struct rdma_sq_send_wqe *swqe,
4263         struct rdma_sq_send_wqe *swqe2,
4264         struct ib_send_wr *wr,
4265         struct ib_send_wr **bad_wr)
4266 {
4267         qlnx_host_t     *ha;
4268         u32             ret = 0;
4269
4270         ha = dev->ha;
4271
4272         QL_DPRINT12(ha, "enter\n");
4273
4274         memset(swqe2, 0, sizeof(*swqe2));
4275
4276         if (wr->send_flags & IB_SEND_INLINE) {
4277                 u8 flags = 0;
4278                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
4279                 return qlnxr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size,
4280                                 wr, bad_wr, &swqe->flags, flags);
4281         }
4282
4283         ret = qlnxr_prepare_sq_sges(dev, qp, &swqe->wqe_size, wr);
4284
4285         QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4286
4287         return (ret);
4288 }
4289
4290 static void
4291 qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
4292 {
4293         qlnx_host_t     *ha;
4294
4295         ha = dev->ha;
4296
4297         int work = info->completed - info->completed_handled - 1;
4298
4299         QL_DPRINT12(ha, "enter [%d]\n", work);
4300
4301         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
4302                 struct qlnxr_pbl *pbl;
4303
4304                 /* Free all the page list that are possible to be freed
4305                  * (all the ones that were invalidated), under the assumption
4306                  * that if an FMR was completed successfully that means that
4307                  * if there was an invalidate operation before it also ended
4308                  */
4309                 pbl = list_first_entry(&info->inuse_pbl_list,
4310                                        struct qlnxr_pbl,
4311                                        list_entry);
4312                 list_del(&pbl->list_entry);
4313                 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
4314                 info->completed_handled++;
4315         }
4316
4317         QL_DPRINT12(ha, "exit\n");
4318         return;
4319 }
4320
4321 #if __FreeBSD_version >= 1102000
4322
4323 static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
4324                 struct rdma_sq_fmr_wqe_1st *fwqe1,
4325                 struct ib_reg_wr *wr)
4326 {
4327         struct qlnxr_mr *mr = get_qlnxr_mr(wr->mr);
4328         struct rdma_sq_fmr_wqe_2nd *fwqe2;
4329
4330         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)ecore_chain_produce(&qp->sq.pbl);
4331         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
4332         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
4333         fwqe1->l_key = wr->key;
4334
4335         fwqe2->access_ctrl = 0;
4336
4337         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
4338                 !!(wr->access & IB_ACCESS_REMOTE_READ));
4339         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
4340                 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
4341         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
4342                 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
4343         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
4344         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
4345                 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
4346         fwqe2->fmr_ctrl = 0;
4347
4348         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4349                 ilog2(mr->ibmr.page_size) - 12);
4350
4351         fwqe2->length_hi = 0; /* TODO - figure out why length is only 32bit.. */
4352         fwqe2->length_lo = mr->ibmr.length;
4353         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
4354         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
4355
4356         qp->wqe_wr_id[qp->sq.prod].mr = mr;
4357
4358         return 0;
4359 }
4360
4361 #else
4362
4363 static void
4364 build_frmr_pbes(struct qlnxr_dev *dev, struct ib_send_wr *wr,
4365         struct mr_info *info)
4366 {
4367         int i;
4368         u64 buf_addr = 0;
4369         int num_pbes, total_num_pbes = 0;
4370         struct regpair *pbe;
4371         struct qlnxr_pbl *pbl_tbl = info->pbl_table;
4372         struct qlnxr_pbl_info *pbl_info = &info->pbl_info;
4373         qlnx_host_t     *ha;
4374
4375         ha = dev->ha;
4376
4377         QL_DPRINT12(ha, "enter\n");
4378
4379         pbe = (struct regpair *)pbl_tbl->va;
4380         num_pbes = 0;
4381
4382         for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
4383                 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
4384                 pbe->lo = cpu_to_le32((u32)buf_addr);
4385                 pbe->hi = cpu_to_le32((u32)upper_32_bits(buf_addr));
4386
4387                 num_pbes += 1;
4388                 pbe++;
4389                 total_num_pbes++;
4390
4391                 if (total_num_pbes == pbl_info->num_pbes)
4392                         return;
4393
4394                 /* if the given pbl is full storing the pbes,
4395                  * move to next pbl.
4396                  */
4397                 if (num_pbes ==
4398                     (pbl_info->pbl_size / sizeof(u64))) {
4399                         pbl_tbl++;
4400                         pbe = (struct regpair *)pbl_tbl->va;
4401                         num_pbes = 0;
4402                 }
4403         }
4404         QL_DPRINT12(ha, "exit\n");
4405
4406         return;
4407 }
4408
4409 static int
4410 qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
4411 {
4412         int rc = 0;
4413         qlnx_host_t     *ha;
4414
4415         ha = dev->ha;
4416
4417         QL_DPRINT12(ha, "enter\n");
4418
4419         if (info->completed == 0) {
4420                 //DP_VERBOSE(dev, QLNXR_MSG_MR, "First FMR\n");
4421                 /* first fmr */
4422                 return 0;
4423         }
4424
4425         qlnx_handle_completed_mrs(dev, info);
4426
4427         list_add_tail(&info->pbl_table->list_entry, &info->inuse_pbl_list);
4428
4429         if (list_empty(&info->free_pbl_list)) {
4430                 info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info,
4431                                                           GFP_ATOMIC);
4432         } else {
4433                 info->pbl_table = list_first_entry(&info->free_pbl_list,
4434                                         struct qlnxr_pbl,
4435                                         list_entry);
4436                 list_del(&info->pbl_table->list_entry);
4437         }
4438
4439         if (!info->pbl_table)
4440                 rc = -ENOMEM;
4441
4442         QL_DPRINT12(ha, "exit\n");
4443         return rc;
4444 }
4445
4446 static inline int
4447 qlnxr_prepare_fmr(struct qlnxr_qp *qp,
4448         struct rdma_sq_fmr_wqe_1st *fwqe1,
4449         struct ib_send_wr *wr)
4450 {
4451         struct qlnxr_dev *dev = qp->dev;
4452         u64 fbo;
4453         struct qlnxr_fast_reg_page_list *frmr_list =
4454                 get_qlnxr_frmr_list(wr->wr.fast_reg.page_list);
4455         struct rdma_sq_fmr_wqe *fwqe2 =
4456                 (struct rdma_sq_fmr_wqe *)ecore_chain_produce(&qp->sq.pbl);
4457         int rc = 0;
4458         qlnx_host_t     *ha;
4459
4460         ha = dev->ha;
4461
4462         QL_DPRINT12(ha, "enter\n");
4463
4464         if (wr->wr.fast_reg.page_list_len == 0)
4465                 BUG();
4466
4467         rc = qlnxr_prepare_safe_pbl(dev, &frmr_list->info);
4468         if (rc)
4469                 return rc;
4470
4471         fwqe1->addr.hi = upper_32_bits(wr->wr.fast_reg.iova_start);
4472         fwqe1->addr.lo = lower_32_bits(wr->wr.fast_reg.iova_start);
4473         fwqe1->l_key = wr->wr.fast_reg.rkey;
4474
4475         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_READ,
4476                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ));
4477         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_WRITE,
4478                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE));
4479         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_ENABLE_ATOMIC,
4480                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_ATOMIC));
4481         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_READ, 1);
4482         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_WRITE,
4483                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE));
4484
4485         fwqe2->fmr_ctrl = 0;
4486
4487         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4488                    ilog2(1 << wr->wr.fast_reg.page_shift) - 12);
4489         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_ZERO_BASED, 0);
4490
4491         fwqe2->length_hi = 0; /* Todo - figure this out... why length is only 32bit.. */
4492         fwqe2->length_lo = wr->wr.fast_reg.length;
4493         fwqe2->pbl_addr.hi = upper_32_bits(frmr_list->info.pbl_table->pa);
4494         fwqe2->pbl_addr.lo = lower_32_bits(frmr_list->info.pbl_table->pa);
4495
4496         /* produce another wqe for fwqe3 */
4497         ecore_chain_produce(&qp->sq.pbl);
4498
4499         fbo = wr->wr.fast_reg.iova_start -
4500             (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
4501
4502         QL_DPRINT12(ha, "wr.fast_reg.iova_start = %p rkey=%x addr=%x:%x"
4503                 " length = %x pbl_addr %x:%x\n",
4504                 wr->wr.fast_reg.iova_start, wr->wr.fast_reg.rkey,
4505                 fwqe1->addr.hi, fwqe1->addr.lo, fwqe2->length_lo,
4506                 fwqe2->pbl_addr.hi, fwqe2->pbl_addr.lo);
4507
4508         build_frmr_pbes(dev, wr, &frmr_list->info);
4509
4510         qp->wqe_wr_id[qp->sq.prod].frmr = frmr_list;
4511
4512         QL_DPRINT12(ha, "exit\n");
4513         return 0;
4514 }
4515
4516 #endif /* #if __FreeBSD_version >= 1102000 */
4517
4518 static enum ib_wc_opcode
4519 qlnxr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
4520 {
4521         switch (opcode) {
4522         case IB_WR_RDMA_WRITE:
4523         case IB_WR_RDMA_WRITE_WITH_IMM:
4524                 return IB_WC_RDMA_WRITE;
4525         case IB_WR_SEND_WITH_IMM:
4526         case IB_WR_SEND:
4527         case IB_WR_SEND_WITH_INV:
4528                 return IB_WC_SEND;
4529         case IB_WR_RDMA_READ:
4530                 return IB_WC_RDMA_READ;
4531         case IB_WR_ATOMIC_CMP_AND_SWP:
4532                 return IB_WC_COMP_SWAP;
4533         case IB_WR_ATOMIC_FETCH_AND_ADD:
4534                 return IB_WC_FETCH_ADD;
4535
4536 #if __FreeBSD_version >= 1102000
4537         case IB_WR_REG_MR:
4538                 return IB_WC_REG_MR;
4539 #else
4540         case IB_WR_FAST_REG_MR:
4541                 return IB_WC_FAST_REG_MR;
4542 #endif /* #if __FreeBSD_version >= 1102000 */
4543
4544         case IB_WR_LOCAL_INV:
4545                 return IB_WC_LOCAL_INV;
4546         default:
4547                 return IB_WC_SEND;
4548         }
4549 }
4550 static inline bool
4551 qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
4552 {
4553         int wq_is_full, err_wr, pbl_is_full;
4554         struct qlnxr_dev *dev = qp->dev;
4555         qlnx_host_t     *ha;
4556
4557         ha = dev->ha;
4558
4559         QL_DPRINT12(ha, "enter[qp, wr] = [%p,%p]\n", qp, wr);
4560
4561         /* prevent SQ overflow and/or processing of a bad WR */
4562         err_wr = wr->num_sge > qp->sq.max_sges;
4563         wq_is_full = qlnxr_wq_is_full(&qp->sq);
4564         pbl_is_full = ecore_chain_get_elem_left_u32(&qp->sq.pbl) <
4565                       QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
4566         if (wq_is_full || err_wr || pbl_is_full) {
4567                 if (wq_is_full &&
4568                     !(qp->err_bitmap & QLNXR_QP_ERR_SQ_FULL)) {
4569                         qp->err_bitmap |= QLNXR_QP_ERR_SQ_FULL;
4570
4571                         QL_DPRINT12(ha,
4572                                 "error: WQ is full. Post send on QP failed"
4573                                 " (this error appears only once) "
4574                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4575                                 qp, wr, qp->err_bitmap);
4576                 }
4577
4578                 if (err_wr &&
4579                     !(qp->err_bitmap & QLNXR_QP_ERR_BAD_SR)) {
4580                         qp->err_bitmap |= QLNXR_QP_ERR_BAD_SR;
4581
4582                         QL_DPRINT12(ha,
4583                                 "error: WQ is bad. Post send on QP failed"
4584                                 " (this error appears only once) "
4585                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4586                                 qp, wr, qp->err_bitmap);
4587                 }
4588
4589                 if (pbl_is_full &&
4590                     !(qp->err_bitmap & QLNXR_QP_ERR_SQ_PBL_FULL)) {
4591                         qp->err_bitmap |= QLNXR_QP_ERR_SQ_PBL_FULL;
4592
4593                         QL_DPRINT12(ha,
4594                                 "error: WQ PBL is full. Post send on QP failed"
4595                                 " (this error appears only once) "
4596                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4597                                 qp, wr, qp->err_bitmap);
4598                 }
4599                 return false;
4600         }
4601         QL_DPRINT12(ha, "exit[qp, wr] = [%p,%p]\n", qp, wr);
4602         return true;
4603 }
4604
4605 int
4606 qlnxr_post_send(struct ib_qp *ibqp,
4607         struct ib_send_wr *wr,
4608         struct ib_send_wr **bad_wr)
4609 {
4610         struct qlnxr_dev        *dev = get_qlnxr_dev(ibqp->device);
4611         struct qlnxr_qp         *qp = get_qlnxr_qp(ibqp);
4612         unsigned long           flags;
4613         int                     status = 0, rc = 0;
4614         bool                    comp;
4615         qlnx_host_t             *ha;
4616         uint32_t                reg_addr;
4617
4618         *bad_wr = NULL;
4619         ha = dev->ha;
4620
4621         QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4622                 ibqp, wr, bad_wr);
4623
4624         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4625                 return -EINVAL;
4626
4627         if (qp->qp_type == IB_QPT_GSI)
4628                 return qlnxr_gsi_post_send(ibqp, wr, bad_wr);
4629
4630         spin_lock_irqsave(&qp->q_lock, flags);
4631
4632         if (QLNX_IS_ROCE(dev) && (qp->state != ECORE_ROCE_QP_STATE_RTS) &&
4633             (qp->state != ECORE_ROCE_QP_STATE_ERR) &&
4634             (qp->state != ECORE_ROCE_QP_STATE_SQD)) {
4635                 spin_unlock_irqrestore(&qp->q_lock, flags);
4636                 *bad_wr = wr;
4637                 QL_DPRINT11(ha, "QP in wrong state! QP icid=0x%x state %d\n",
4638                         qp->icid, qp->state);
4639                 return -EINVAL;
4640         }
4641
4642         if (!wr) {
4643                 QL_DPRINT11(ha, "Got an empty post send???\n");
4644         }
4645
4646         while (wr) {
4647                 struct rdma_sq_common_wqe       *wqe;
4648                 struct rdma_sq_send_wqe         *swqe;
4649                 struct rdma_sq_send_wqe         *swqe2;
4650                 struct rdma_sq_rdma_wqe_1st     *rwqe;
4651                 struct rdma_sq_rdma_wqe_2nd     *rwqe2;
4652                 struct rdma_sq_local_inv_wqe    *iwqe;
4653                 struct rdma_sq_atomic_wqe       *awqe1;
4654                 struct rdma_sq_atomic_wqe       *awqe2;
4655                 struct rdma_sq_atomic_wqe       *awqe3;
4656                 struct rdma_sq_fmr_wqe_1st      *fwqe1;
4657
4658                 if (!qlnxr_can_post_send(qp, wr)) {
4659                         status = -ENOMEM;
4660                         *bad_wr = wr;
4661                         break;
4662                 }
4663
4664                 wqe = ecore_chain_produce(&qp->sq.pbl);
4665
4666                 qp->wqe_wr_id[qp->sq.prod].signaled =
4667                         !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
4668
4669                 /* common fields */
4670                 wqe->flags = 0;
4671                 wqe->flags |= (RDMA_SQ_SEND_WQE_COMP_FLG_MASK <<
4672                                 RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT);
4673
4674                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, \
4675                         !!(wr->send_flags & IB_SEND_SOLICITED));
4676
4677                 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) ||
4678                                 (qp->signaled);
4679
4680                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
4681                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,  \
4682                         !!(wr->send_flags & IB_SEND_FENCE));
4683
4684                 wqe->prev_wqe_size = qp->prev_wqe_size;
4685
4686                 qp->wqe_wr_id[qp->sq.prod].opcode = qlnxr_ib_to_wc_opcode(wr->opcode);
4687
4688                 switch (wr->opcode) {
4689                 case IB_WR_SEND_WITH_IMM:
4690
4691                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
4692                         swqe = (struct rdma_sq_send_wqe *)wqe;
4693                         swqe->wqe_size = 2;
4694                         swqe2 = (struct rdma_sq_send_wqe *)
4695                                         ecore_chain_produce(&qp->sq.pbl);
4696                         swqe->inv_key_or_imm_data =
4697                                 cpu_to_le32(wr->ex.imm_data);
4698                         swqe->length = cpu_to_le32(
4699                                                 qlnxr_prepare_sq_send_data(dev,
4700                                                         qp, swqe, swqe2, wr,
4701                                                         bad_wr));
4702
4703                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4704                         qp->prev_wqe_size = swqe->wqe_size;
4705                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4706
4707                         QL_DPRINT12(ha, "SEND w/ IMM length = %d imm data=%x\n",
4708                                 swqe->length, wr->ex.imm_data);
4709
4710                         break;
4711
4712                 case IB_WR_SEND:
4713
4714                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
4715                         swqe = (struct rdma_sq_send_wqe *)wqe;
4716
4717                         swqe->wqe_size = 2;
4718                         swqe2 = (struct rdma_sq_send_wqe *)
4719                                         ecore_chain_produce(&qp->sq.pbl);
4720                         swqe->length = cpu_to_le32(
4721                                                 qlnxr_prepare_sq_send_data(dev,
4722                                                         qp, swqe, swqe2, wr,
4723                                                         bad_wr));
4724                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4725                         qp->prev_wqe_size = swqe->wqe_size;
4726                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4727
4728                         QL_DPRINT12(ha, "SEND w/o IMM length = %d\n",
4729                                 swqe->length);
4730
4731                         break;
4732
4733                 case IB_WR_SEND_WITH_INV:
4734
4735                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
4736                         swqe = (struct rdma_sq_send_wqe *)wqe;
4737                         swqe2 = (struct rdma_sq_send_wqe *)
4738                                         ecore_chain_produce(&qp->sq.pbl);
4739                         swqe->wqe_size = 2;
4740                         swqe->inv_key_or_imm_data =
4741                                 cpu_to_le32(wr->ex.invalidate_rkey);
4742                         swqe->length = cpu_to_le32(qlnxr_prepare_sq_send_data(dev,
4743                                                 qp, swqe, swqe2, wr, bad_wr));
4744                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4745                         qp->prev_wqe_size = swqe->wqe_size;
4746                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4747
4748                         QL_DPRINT12(ha, "SEND w INVALIDATE length = %d\n",
4749                                 swqe->length);
4750                         break;
4751
4752                 case IB_WR_RDMA_WRITE_WITH_IMM:
4753
4754                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
4755                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4756
4757                         rwqe->wqe_size = 2;
4758                         rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
4759                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4760                                         ecore_chain_produce(&qp->sq.pbl);
4761                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4762                                                 qp, rwqe, rwqe2, wr, bad_wr));
4763                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4764                         qp->prev_wqe_size = rwqe->wqe_size;
4765                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4766
4767                         QL_DPRINT12(ha,
4768                                 "RDMA WRITE w/ IMM length = %d imm data=%x\n",
4769                                 rwqe->length, rwqe->imm_data);
4770
4771                         break;
4772
4773                 case IB_WR_RDMA_WRITE:
4774
4775                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
4776                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4777
4778                         rwqe->wqe_size = 2;
4779                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4780                                         ecore_chain_produce(&qp->sq.pbl);
4781                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4782                                                 qp, rwqe, rwqe2, wr, bad_wr));
4783                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4784                         qp->prev_wqe_size = rwqe->wqe_size;
4785                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4786
4787                         QL_DPRINT12(ha,
4788                                 "RDMA WRITE w/o IMM length = %d\n",
4789                                 rwqe->length);
4790
4791                         break;
4792
4793                 case IB_WR_RDMA_READ_WITH_INV:
4794
4795                         QL_DPRINT12(ha,
4796                                 "RDMA READ WITH INVALIDATE not supported\n");
4797
4798                         *bad_wr = wr;
4799                         rc = -EINVAL;
4800
4801                         break;
4802
4803                 case IB_WR_RDMA_READ:
4804
4805                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
4806                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4807
4808                         rwqe->wqe_size = 2;
4809                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4810                                         ecore_chain_produce(&qp->sq.pbl);
4811                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4812                                                 qp, rwqe, rwqe2, wr, bad_wr));
4813
4814                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4815                         qp->prev_wqe_size = rwqe->wqe_size;
4816                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4817
4818                         QL_DPRINT12(ha, "RDMA READ length = %d\n",
4819                                 rwqe->length);
4820
4821                         break;
4822
4823                 case IB_WR_ATOMIC_CMP_AND_SWP:
4824                 case IB_WR_ATOMIC_FETCH_AND_ADD:
4825
4826                         QL_DPRINT12(ha,
4827                                 "ATOMIC operation = %s\n",
4828                                 ((wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) ?
4829                                         "IB_WR_ATOMIC_CMP_AND_SWP" : 
4830                                         "IB_WR_ATOMIC_FETCH_AND_ADD"));
4831
4832                         awqe1 = (struct rdma_sq_atomic_wqe *)wqe;
4833                         awqe1->prev_wqe_size = 4;
4834
4835                         awqe2 = (struct rdma_sq_atomic_wqe *)
4836                                         ecore_chain_produce(&qp->sq.pbl);
4837
4838                         TYPEPTR_ADDR_SET(awqe2, remote_va, \
4839                                 atomic_wr(wr)->remote_addr);
4840
4841                         awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
4842
4843                         awqe3 = (struct rdma_sq_atomic_wqe *)
4844                                         ecore_chain_produce(&qp->sq.pbl);
4845
4846                         if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
4847                                 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
4848                                 TYPEPTR_ADDR_SET(awqe3, swap_data,
4849                                                  atomic_wr(wr)->compare_add);
4850                         } else {
4851                                 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
4852                                 TYPEPTR_ADDR_SET(awqe3, swap_data,
4853                                                  atomic_wr(wr)->swap);
4854                                 TYPEPTR_ADDR_SET(awqe3, cmp_data,
4855                                                  atomic_wr(wr)->compare_add);
4856                         }
4857
4858                         qlnxr_prepare_sq_sges(dev, qp, NULL, wr);
4859
4860                         qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->prev_wqe_size;
4861                         qp->prev_wqe_size = awqe1->prev_wqe_size;
4862
4863                         break;
4864
4865                 case IB_WR_LOCAL_INV:
4866
4867                         QL_DPRINT12(ha,
4868                                 "INVALIDATE length (IB_WR_LOCAL_INV)\n");
4869
4870                         iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
4871                         iwqe->prev_wqe_size = 1;
4872
4873                         iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
4874                         iwqe->inv_l_key = wr->ex.invalidate_rkey;
4875                         qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->prev_wqe_size;
4876                         qp->prev_wqe_size = iwqe->prev_wqe_size;
4877
4878                         break;
4879
4880 #if __FreeBSD_version >= 1102000
4881
4882                 case IB_WR_REG_MR:
4883
4884                         QL_DPRINT12(ha, "IB_WR_REG_MR\n");
4885
4886                         wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4887                         fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4888                         fwqe1->wqe_size = 2;
4889
4890                         rc = qlnxr_prepare_reg(qp, fwqe1, reg_wr(wr));
4891                         if (rc) {
4892                                 QL_DPRINT11(ha, "IB_WR_REG_MR failed rc=%d\n", rc);
4893                                 *bad_wr = wr;
4894                                 break;
4895                         }
4896
4897                         qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
4898                         qp->prev_wqe_size = fwqe1->wqe_size;
4899
4900                         break;
4901 #else
4902                 case IB_WR_FAST_REG_MR:
4903
4904                         QL_DPRINT12(ha, "FAST_MR (IB_WR_FAST_REG_MR)\n");
4905
4906                         wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4907                         fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4908                         fwqe1->prev_wqe_size = 3;
4909
4910                         rc = qlnxr_prepare_fmr(qp, fwqe1, wr);
4911
4912                         if (rc) {
4913                                 QL_DPRINT12(ha,
4914                                         "FAST_MR (IB_WR_FAST_REG_MR) failed"
4915                                         " rc = %d\n", rc);
4916                                 *bad_wr = wr;
4917                                 break;
4918                         }
4919
4920                         qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->prev_wqe_size;
4921                         qp->prev_wqe_size = fwqe1->prev_wqe_size;
4922
4923                         break;
4924 #endif /* #if __FreeBSD_version >= 1102000 */
4925
4926                 default:
4927
4928                         QL_DPRINT12(ha, "Invalid Opcode 0x%x!\n", wr->opcode);
4929
4930                         rc = -EINVAL;
4931                         *bad_wr = wr;
4932                         break;
4933                 }
4934
4935                 if (*bad_wr) {
4936                         /*
4937                          * restore prod to its position before this WR was processed
4938                          */
4939                         ecore_chain_set_prod(&qp->sq.pbl,
4940                              le16_to_cpu(qp->sq.db_data.data.value),
4941                              wqe);
4942                         /* restore prev_wqe_size */
4943                         qp->prev_wqe_size = wqe->prev_wqe_size;
4944                         status = rc;
4945
4946                         QL_DPRINT12(ha, "failed *bad_wr = %p\n", *bad_wr);
4947                         break; /* out of the loop */
4948                 }
4949
4950                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
4951
4952                 qlnxr_inc_sw_prod(&qp->sq);
4953
4954                 qp->sq.db_data.data.value++;
4955
4956                 wr = wr->next;
4957         }
4958
4959         /* Trigger doorbell
4960          * If there was a failure in the first WR then it will be triggered in
4961          * vane. However this is not harmful (as long as the producer value is
4962          * unchanged). For performance reasons we avoid checking for this
4963          * redundant doorbell.
4964          */
4965         wmb();
4966         //writel(qp->sq.db_data.raw, qp->sq.db);
4967
4968         reg_addr = (uint32_t)((uint8_t *)qp->sq.db - (uint8_t *)ha->cdev.doorbells);
4969         bus_write_4(ha->pci_dbells, reg_addr, qp->sq.db_data.raw);
4970         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
4971
4972         mmiowb();
4973
4974         spin_unlock_irqrestore(&qp->q_lock, flags);
4975
4976         QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4977                 ibqp, wr, bad_wr);
4978
4979         return status;
4980 }
4981
4982 static u32
4983 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
4984 {
4985         u32 used;
4986
4987         /* Calculate number of elements used based on producer
4988          * count and consumer count and subtract it from max
4989          * work request supported so that we get elements left.
4990          */
4991         used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
4992
4993         return hw_srq->max_wr - used;
4994 }
4995
4996 int
4997 qlnxr_post_recv(struct ib_qp *ibqp,
4998         struct ib_recv_wr *wr,
4999         struct ib_recv_wr **bad_wr)
5000 {
5001         struct qlnxr_qp         *qp = get_qlnxr_qp(ibqp);
5002         struct qlnxr_dev        *dev = qp->dev;
5003         unsigned long           flags;
5004         int                     status = 0;
5005         qlnx_host_t             *ha;
5006         uint32_t                reg_addr;
5007
5008         ha = dev->ha;
5009
5010         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5011                 return -EINVAL;
5012
5013         QL_DPRINT12(ha, "enter\n");
5014
5015         if (qp->qp_type == IB_QPT_GSI) {
5016                 QL_DPRINT12(ha, "(qp->qp_type = IB_QPT_GSI)\n");
5017                 return qlnxr_gsi_post_recv(ibqp, wr, bad_wr);
5018         }
5019
5020         if (qp->srq) {
5021                 QL_DPRINT11(ha, "qp->srq [%p]"
5022                         " QP is associated with SRQ, cannot post RQ buffers\n",
5023                         qp->srq);
5024                 return -EINVAL;
5025         }
5026
5027         spin_lock_irqsave(&qp->q_lock, flags);
5028
5029         if (qp->state == ECORE_ROCE_QP_STATE_RESET) {
5030                 spin_unlock_irqrestore(&qp->q_lock, flags);
5031                 *bad_wr = wr;
5032
5033                 QL_DPRINT11(ha, "qp->qp_type = ECORE_ROCE_QP_STATE_RESET\n");
5034
5035                 return -EINVAL;
5036         }
5037
5038         while (wr) {
5039                 int i;
5040
5041                 if ((ecore_chain_get_elem_left_u32(&qp->rq.pbl) <
5042                         QLNXR_MAX_RQE_ELEMENTS_PER_RQE) ||
5043                         (wr->num_sge > qp->rq.max_sges)) {
5044                         status = -ENOMEM;
5045                         *bad_wr = wr;
5046                         break;
5047                 }
5048                 for (i = 0; i < wr->num_sge; i++) {
5049                         u32 flags = 0;
5050                         struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
5051
5052                         /* first one must include the number of SGE in the list */
5053                         if (!i)
5054                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge);
5055
5056                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, wr->sg_list[i].lkey);
5057
5058                         RQ_SGE_SET(rqe, wr->sg_list[i].addr, \
5059                                 wr->sg_list[i].length, flags);
5060                 }
5061                 /* Special case of no sges. FW requires between 1-4 sges...
5062                  * in this case we need to post 1 sge with length zero. this is
5063                  * because rdma write with immediate consumes an RQ. */
5064                 if (!wr->num_sge) {
5065                         u32 flags = 0;
5066                         struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
5067
5068                         /* first one must include the number of SGE in the list */
5069                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
5070                         SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
5071
5072                         //RQ_SGE_SET(rqe, 0, 0, flags);
5073                         rqe->addr.hi = 0;
5074                         rqe->addr.lo = 0;
5075
5076                         rqe->length = 0;
5077                         rqe->flags = cpu_to_le32(flags);
5078
5079                         i = 1;
5080                 }
5081
5082                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
5083                 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
5084
5085                 qlnxr_inc_sw_prod(&qp->rq);
5086
5087                 wmb();
5088
5089                 qp->rq.db_data.data.value++;
5090
5091         //      writel(qp->rq.db_data.raw, qp->rq.db);
5092                 mmiowb();
5093         //      if (QLNX_IS_IWARP(dev)) {
5094         //              writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
5095         //              mmiowb(); /* for second doorbell */
5096         //      }
5097
5098                 reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
5099                                 (uint8_t *)ha->cdev.doorbells);
5100
5101                 bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
5102                 bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5103
5104                 if (QLNX_IS_IWARP(dev)) {
5105                         reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
5106                                                 (uint8_t *)ha->cdev.doorbells);
5107                         bus_write_4(ha->pci_dbells, reg_addr, \
5108                                 qp->rq.iwarp_db2_data.raw);
5109                         bus_barrier(ha->pci_dbells,  0, 0, \
5110                                 BUS_SPACE_BARRIER_READ);
5111                 }
5112
5113                 wr = wr->next;
5114         }
5115
5116         spin_unlock_irqrestore(&qp->q_lock, flags);
5117
5118         QL_DPRINT12(ha, "exit status = 0x%x\n", status);
5119
5120         return status;
5121 }
5122
5123 /* In fmr we need to increase the number of fmr completed counter for the fmr
5124  * algorithm determining whether we can free a pbl or not.
5125  * we need to perform this whether the work request was signaled or not. for
5126  * this purpose we call this function from the condition that checks if a wr
5127  * should be skipped, to make sure we don't miss it ( possibly this fmr
5128  * operation was not signalted)
5129  */
5130 static inline void
5131 qlnxr_chk_if_fmr(struct qlnxr_qp *qp)
5132 {
5133 #if __FreeBSD_version >= 1102000
5134
5135         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
5136                 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5137 #else
5138         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_FAST_REG_MR)
5139                 qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5140
5141 #endif /* #if __FreeBSD_version >= 1102000 */
5142 }
5143
5144 static int
5145 process_req(struct qlnxr_dev *dev,
5146         struct qlnxr_qp *qp,
5147         struct qlnxr_cq *cq,
5148         int num_entries,
5149         struct ib_wc *wc,
5150         u16 hw_cons,
5151         enum ib_wc_status status,
5152         int force)
5153 {
5154         u16             cnt = 0;
5155         qlnx_host_t     *ha = dev->ha;
5156
5157         QL_DPRINT12(ha, "enter\n");
5158
5159         while (num_entries && qp->sq.wqe_cons != hw_cons) {
5160                 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
5161                         qlnxr_chk_if_fmr(qp);
5162                         /* skip WC */
5163                         goto next_cqe;
5164                 }
5165
5166                 /* fill WC */
5167                 wc->status = status;
5168                 wc->vendor_err = 0;
5169                 wc->wc_flags = 0;
5170                 wc->src_qp = qp->id;
5171                 wc->qp = &qp->ibqp;
5172
5173                 // common section
5174                 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
5175                 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
5176
5177                 switch (wc->opcode) {
5178                 case IB_WC_RDMA_WRITE:
5179
5180                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
5181
5182                         QL_DPRINT12(ha,
5183                                 "opcode = IB_WC_RDMA_WRITE bytes = %d\n",
5184                                 qp->wqe_wr_id[qp->sq.cons].bytes_len);
5185                         break;
5186
5187                 case IB_WC_COMP_SWAP:
5188                 case IB_WC_FETCH_ADD:
5189                         wc->byte_len = 8;
5190                         break;
5191
5192 #if __FreeBSD_version >= 1102000
5193                 case IB_WC_REG_MR:
5194                         qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5195                         break;
5196 #else
5197                 case IB_WC_FAST_REG_MR:
5198                         qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5199                         break;
5200 #endif /* #if __FreeBSD_version >= 1102000 */
5201
5202                 case IB_WC_RDMA_READ:
5203                 case IB_WC_SEND:
5204
5205                         QL_DPRINT12(ha, "opcode = 0x%x \n", wc->opcode);
5206                         break;
5207                 default:
5208                         ;//DP_ERR("TBD ERROR");
5209                 }
5210
5211                 num_entries--;
5212                 wc++;
5213                 cnt++;
5214 next_cqe:
5215                 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
5216                         ecore_chain_consume(&qp->sq.pbl);
5217                 qlnxr_inc_sw_cons(&qp->sq);
5218         }
5219
5220         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5221         return cnt;
5222 }
5223
5224 static int
5225 qlnxr_poll_cq_req(struct qlnxr_dev *dev,
5226         struct qlnxr_qp *qp,
5227         struct qlnxr_cq *cq,
5228         int num_entries,
5229         struct ib_wc *wc,
5230         struct rdma_cqe_requester *req)
5231 {
5232         int             cnt = 0;
5233         qlnx_host_t     *ha = dev->ha;
5234
5235         QL_DPRINT12(ha, "enter req->status = 0x%x\n", req->status);
5236
5237         switch (req->status) {
5238         case RDMA_CQE_REQ_STS_OK:
5239
5240                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5241                         IB_WC_SUCCESS, 0);
5242                 break;
5243
5244         case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
5245
5246                 if (qp->state != ECORE_ROCE_QP_STATE_ERR)
5247                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5248                                   IB_WC_WR_FLUSH_ERR, 1);
5249                 break;
5250
5251         default: /* other errors case */
5252
5253                 /* process all WQE before the cosumer */
5254                 qp->state = ECORE_ROCE_QP_STATE_ERR;
5255                 cnt = process_req(dev, qp, cq, num_entries, wc,
5256                                 req->sq_cons - 1, IB_WC_SUCCESS, 0);
5257                 wc += cnt;
5258                 /* if we have extra WC fill it with actual error info */
5259
5260                 if (cnt < num_entries) {
5261                         enum ib_wc_status wc_status;
5262
5263                         switch (req->status) {
5264                         case    RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
5265                                 wc_status = IB_WC_BAD_RESP_ERR;
5266                                 break;
5267                         case    RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
5268                                 wc_status = IB_WC_LOC_LEN_ERR;
5269                                 break;
5270                         case    RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
5271                                 wc_status = IB_WC_LOC_QP_OP_ERR;
5272                                 break;
5273                         case    RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
5274                                 wc_status = IB_WC_LOC_PROT_ERR;
5275                                 break;
5276                         case    RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
5277                                 wc_status = IB_WC_MW_BIND_ERR;
5278                                 break;
5279                         case    RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
5280                                 wc_status = IB_WC_REM_INV_REQ_ERR;
5281                                 break;
5282                         case    RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
5283                                 wc_status = IB_WC_REM_ACCESS_ERR;
5284                                 break;
5285                         case    RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
5286                                 wc_status = IB_WC_REM_OP_ERR;
5287                                 break;
5288                         case    RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
5289                                 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
5290                                 break;
5291                         case    RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
5292                                 wc_status = IB_WC_RETRY_EXC_ERR;
5293                                 break;
5294                         default:
5295                                 wc_status = IB_WC_GENERAL_ERR;
5296                         }
5297
5298                         cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
5299                                         wc_status, 1 /* force use of WC */);
5300                 }
5301         }
5302
5303         QL_DPRINT12(ha, "exit cnt = %d\n", cnt);
5304         return cnt;
5305 }
5306
5307 static void
5308 __process_resp_one(struct qlnxr_dev *dev,
5309         struct qlnxr_qp *qp,
5310         struct qlnxr_cq *cq,
5311         struct ib_wc *wc,
5312         struct rdma_cqe_responder *resp,
5313         u64 wr_id)
5314 {
5315         enum ib_wc_status       wc_status = IB_WC_SUCCESS;
5316 #if __FreeBSD_version < 1102000
5317         u8                      flags;
5318 #endif
5319         qlnx_host_t             *ha = dev->ha;
5320
5321         QL_DPRINT12(ha, "enter qp = %p resp->status = 0x%x\n",
5322                 qp, resp->status);
5323
5324         wc->opcode = IB_WC_RECV;
5325         wc->wc_flags = 0;
5326
5327         switch (resp->status) {
5328         case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
5329                 wc_status = IB_WC_LOC_ACCESS_ERR;
5330                 break;
5331
5332         case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
5333                 wc_status = IB_WC_LOC_LEN_ERR;
5334                 break;
5335
5336         case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
5337                 wc_status = IB_WC_LOC_QP_OP_ERR;
5338                 break;
5339
5340         case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
5341                 wc_status = IB_WC_LOC_PROT_ERR;
5342                 break;
5343
5344         case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
5345                 wc_status = IB_WC_MW_BIND_ERR;
5346                 break;
5347
5348         case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
5349                 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
5350                 break;
5351
5352         case RDMA_CQE_RESP_STS_OK:
5353
5354 #if __FreeBSD_version >= 1102000
5355                 if (resp->flags & QLNXR_RESP_IMM) {
5356                         wc->ex.imm_data =
5357                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5358                         wc->wc_flags |= IB_WC_WITH_IMM;
5359
5360                         if (resp->flags & QLNXR_RESP_RDMA)
5361                                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5362
5363                         if (resp->flags & QLNXR_RESP_INV) {
5364                                 QL_DPRINT11(ha,
5365                                         "Invalid flags QLNXR_RESP_INV [0x%x]"
5366                                         "qp = %p qp->id = 0x%x cq = %p"
5367                                         " cq->icid = 0x%x\n",
5368                                         resp->flags, qp, qp->id, cq, cq->icid );
5369                         }
5370                 } else if (resp->flags & QLNXR_RESP_INV) {
5371                         wc->ex.imm_data =
5372                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5373                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
5374
5375                         if (resp->flags & QLNXR_RESP_RDMA) {
5376                                 QL_DPRINT11(ha,
5377                                         "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5378                                         "qp = %p qp->id = 0x%x cq = %p"
5379                                         " cq->icid = 0x%x\n",
5380                                         resp->flags, qp, qp->id, cq, cq->icid );
5381                         }
5382                 } else if (resp->flags & QLNXR_RESP_RDMA) {
5383                         QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5384                                 "qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5385                                 resp->flags, qp, qp->id, cq, cq->icid );
5386                 }
5387 #else
5388                 wc_status = IB_WC_SUCCESS;
5389                 wc->byte_len = le32_to_cpu(resp->length);
5390
5391                 flags = resp->flags & QLNXR_RESP_RDMA_IMM;
5392
5393                 switch (flags) {
5394                 case QLNXR_RESP_RDMA_IMM:
5395                         /* update opcode */
5396                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5397                         /* fall to set imm data */
5398                 case QLNXR_RESP_IMM:
5399                         wc->ex.imm_data =
5400                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5401                         wc->wc_flags |= IB_WC_WITH_IMM;
5402                         break;
5403                 case QLNXR_RESP_RDMA:
5404                         QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5405                                 "qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5406                                 resp->flags, qp, qp->id, cq, cq->icid );
5407                         break;
5408                 default:
5409                         /* valid configuration, but nothing todo here */
5410                         ;
5411                 }
5412 #endif /* #if __FreeBSD_version >= 1102000 */
5413
5414                 break;
5415         default:
5416                 wc_status = IB_WC_GENERAL_ERR;
5417         }
5418
5419         /* fill WC */
5420         wc->status = wc_status;
5421         wc->vendor_err = 0;
5422         wc->src_qp = qp->id;
5423         wc->qp = &qp->ibqp;
5424         wc->wr_id = wr_id;
5425
5426         QL_DPRINT12(ha, "exit status = 0x%x\n", wc_status);
5427
5428         return;
5429 }
5430
5431 static int
5432 process_resp_one_srq(struct qlnxr_dev *dev,
5433         struct qlnxr_qp *qp,
5434         struct qlnxr_cq *cq,
5435         struct ib_wc *wc,
5436         struct rdma_cqe_responder *resp)
5437 {
5438         struct qlnxr_srq        *srq = qp->srq;
5439         u64                     wr_id;
5440         qlnx_host_t             *ha = dev->ha;
5441
5442         QL_DPRINT12(ha, "enter\n");
5443
5444         wr_id = HILO_U64(resp->srq_wr_id.hi, resp->srq_wr_id.lo);
5445
5446         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5447                 wc->status = IB_WC_WR_FLUSH_ERR;
5448                 wc->vendor_err = 0;
5449                 wc->wr_id = wr_id;
5450                 wc->byte_len = 0;
5451                 wc->src_qp = qp->id;
5452                 wc->qp = &qp->ibqp;
5453                 wc->wr_id = wr_id;
5454         } else {
5455                 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
5456         }
5457
5458         /* PBL is maintained in case of WR granularity.
5459          * So increment WR consumer after consuming WR
5460          */
5461         srq->hw_srq.wr_cons_cnt++;
5462
5463         QL_DPRINT12(ha, "exit\n");
5464         return 1;
5465 }
5466
5467 static int
5468 process_resp_one(struct qlnxr_dev *dev,
5469         struct qlnxr_qp *qp,
5470         struct qlnxr_cq *cq,
5471         struct ib_wc *wc,
5472         struct rdma_cqe_responder *resp)
5473 {
5474         qlnx_host_t     *ha = dev->ha;
5475         u64             wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5476
5477         QL_DPRINT12(ha, "enter\n");
5478
5479         __process_resp_one(dev, qp, cq, wc, resp, wr_id);
5480
5481         while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5482                 ecore_chain_consume(&qp->rq.pbl);
5483         qlnxr_inc_sw_cons(&qp->rq);
5484
5485         QL_DPRINT12(ha, "exit\n");
5486         return 1;
5487 }
5488
5489 static int
5490 process_resp_flush(struct qlnxr_qp *qp,
5491         int num_entries,
5492         struct ib_wc *wc,
5493         u16 hw_cons)
5494 {
5495         u16             cnt = 0;
5496         qlnx_host_t     *ha = qp->dev->ha;
5497
5498         QL_DPRINT12(ha, "enter\n");
5499
5500         while (num_entries && qp->rq.wqe_cons != hw_cons) {
5501                 /* fill WC */
5502                 wc->status = IB_WC_WR_FLUSH_ERR;
5503                 wc->vendor_err = 0;
5504                 wc->wc_flags = 0;
5505                 wc->src_qp = qp->id;
5506                 wc->byte_len = 0;
5507                 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5508                 wc->qp = &qp->ibqp;
5509                 num_entries--;
5510                 wc++;
5511                 cnt++;
5512                 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5513                         ecore_chain_consume(&qp->rq.pbl);
5514                 qlnxr_inc_sw_cons(&qp->rq);
5515         }
5516
5517         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5518         return cnt;
5519 }
5520
5521 static void
5522 try_consume_resp_cqe(struct qlnxr_cq *cq,
5523         struct qlnxr_qp *qp,
5524         struct rdma_cqe_responder *resp,
5525         int *update)
5526 {
5527         if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
5528                 consume_cqe(cq);
5529                 *update |= 1;
5530         }
5531 }
5532
5533 static int
5534 qlnxr_poll_cq_resp_srq(struct qlnxr_dev *dev,
5535         struct qlnxr_qp *qp,
5536         struct qlnxr_cq *cq,
5537         int num_entries,
5538         struct ib_wc *wc,
5539         struct rdma_cqe_responder *resp,
5540         int *update)
5541 {
5542         int             cnt;
5543         qlnx_host_t     *ha = dev->ha;
5544
5545         QL_DPRINT12(ha, "enter\n");
5546
5547         cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
5548         consume_cqe(cq);
5549         *update |= 1;
5550
5551         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5552         return cnt;
5553 }
5554
5555 static int
5556 qlnxr_poll_cq_resp(struct qlnxr_dev *dev,
5557         struct qlnxr_qp *qp,
5558         struct qlnxr_cq *cq,
5559         int num_entries,
5560         struct ib_wc *wc,
5561         struct rdma_cqe_responder *resp,
5562         int *update)
5563 {
5564         int             cnt;
5565         qlnx_host_t     *ha = dev->ha;
5566
5567         QL_DPRINT12(ha, "enter\n");
5568
5569         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5570                 cnt = process_resp_flush(qp, num_entries, wc,
5571                                 resp->rq_cons);
5572                 try_consume_resp_cqe(cq, qp, resp, update);
5573         } else {
5574                 cnt = process_resp_one(dev, qp, cq, wc, resp);
5575                 consume_cqe(cq);
5576                 *update |= 1;
5577         }
5578
5579         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5580         return cnt;
5581 }
5582
5583 static void
5584 try_consume_req_cqe(struct qlnxr_cq *cq, struct qlnxr_qp *qp,
5585         struct rdma_cqe_requester *req, int *update)
5586 {
5587         if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
5588                 consume_cqe(cq);
5589                 *update |= 1;
5590         }
5591 }
5592
5593 static void
5594 doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
5595 {
5596         uint64_t        reg_addr;
5597         qlnx_host_t     *ha = dev->ha;
5598
5599         QL_DPRINT12(ha, "enter\n");
5600
5601         wmb();
5602         cq->db.data.agg_flags = flags;
5603         cq->db.data.value = cpu_to_le32(cons);
5604
5605         reg_addr = (uint64_t)((uint8_t *)cq->db_addr -
5606                                 (uint8_t *)(ha->cdev.doorbells));
5607
5608         bus_write_8(ha->pci_dbells, reg_addr, cq->db.raw);
5609         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5610
5611         QL_DPRINT12(ha, "exit\n");
5612         return;
5613
5614 //#ifdef __LP64__
5615 //      writeq(cq->db.raw, cq->db_addr);
5616 //#else
5617         /* Note that since the FW allows 64 bit write only, in 32bit systems
5618          * the value of db_addr must be low enough. This is currently not
5619          * enforced.
5620          */
5621 //      writel(cq->db.raw & 0xffffffff, cq->db_addr);
5622 //      mmiowb();
5623 //#endif
5624 }
5625
5626 static int
5627 is_valid_cqe(struct qlnxr_cq *cq, union rdma_cqe *cqe)
5628 {
5629         struct rdma_cqe_requester *resp_cqe = &cqe->req;
5630         return (resp_cqe->flags & RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK) ==
5631                         cq->pbl_toggle;
5632 }
5633
5634 int
5635 qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
5636 {
5637         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5638         struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
5639         int             done = 0;
5640         union rdma_cqe  *cqe = cq->latest_cqe;
5641         int             update = 0;
5642         u32             old_cons, new_cons;
5643         unsigned long   flags;
5644         qlnx_host_t     *ha = dev->ha;
5645
5646         QL_DPRINT12(ha, "enter\n");
5647
5648         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5649                 return -EINVAL;
5650
5651         if (cq->destroyed) {
5652                 QL_DPRINT11(ha, "called after destroy for cq %p (icid=%d)\n",
5653                         cq, cq->icid);
5654                 return 0;
5655         }
5656
5657         if (cq->cq_type == QLNXR_CQ_TYPE_GSI)
5658                 return qlnxr_gsi_poll_cq(ibcq, num_entries, wc);
5659
5660         spin_lock_irqsave(&cq->cq_lock, flags);
5661
5662         old_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5663
5664         while (num_entries && is_valid_cqe(cq, cqe)) {
5665                 int cnt = 0;
5666                 struct qlnxr_qp *qp;
5667                 struct rdma_cqe_requester *resp_cqe;
5668                 enum rdma_cqe_type cqe_type;
5669
5670                 /* prevent speculative reads of any field of CQE */
5671                 rmb();
5672
5673                 resp_cqe = &cqe->req;
5674                 qp = (struct qlnxr_qp *)(uintptr_t)HILO_U64(resp_cqe->qp_handle.hi,
5675                                                 resp_cqe->qp_handle.lo);
5676
5677                 if (!qp) {
5678                         QL_DPRINT11(ha, "qp = NULL\n");
5679                         break;
5680                 }
5681
5682                 wc->qp = &qp->ibqp;
5683
5684                 cqe_type = GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
5685
5686                 switch (cqe_type) {
5687                 case RDMA_CQE_TYPE_REQUESTER:
5688                         cnt = qlnxr_poll_cq_req(dev, qp, cq, num_entries,
5689                                         wc, &cqe->req);
5690                         try_consume_req_cqe(cq, qp, &cqe->req, &update);
5691                         break;
5692                 case RDMA_CQE_TYPE_RESPONDER_RQ:
5693                         cnt = qlnxr_poll_cq_resp(dev, qp, cq, num_entries,
5694                                         wc, &cqe->resp, &update);
5695                         break;
5696                 case RDMA_CQE_TYPE_RESPONDER_SRQ:
5697                         cnt = qlnxr_poll_cq_resp_srq(dev, qp, cq, num_entries,
5698                                         wc, &cqe->resp, &update);
5699                         break;
5700                 case RDMA_CQE_TYPE_INVALID:
5701                 default:
5702                         QL_DPRINT11(ha, "cqe type [0x%x] invalid\n", cqe_type);
5703                         break;
5704                 }
5705                 num_entries -= cnt;
5706                 wc += cnt;
5707                 done += cnt;
5708
5709                 cqe = cq->latest_cqe;
5710         }
5711         new_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5712
5713         cq->cq_cons += new_cons - old_cons;
5714
5715         if (update) {
5716                 /* doorbell notifies abount latest VALID entry,
5717                  * but chain already point to the next INVALID one
5718                  */
5719                 doorbell_cq(dev, cq, cq->cq_cons - 1, cq->arm_flags);
5720                 QL_DPRINT12(ha, "cq = %p cons = 0x%x "
5721                         "arm_flags = 0x%x db.icid = 0x%x\n", cq,
5722                         (cq->cq_cons - 1), cq->arm_flags, cq->db.data.icid);
5723         }
5724
5725         spin_unlock_irqrestore(&cq->cq_lock, flags);
5726
5727         QL_DPRINT12(ha, "exit\n");
5728
5729         return done;
5730 }
5731
5732 int
5733 qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
5734 {
5735         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5736         unsigned long sflags;
5737         struct qlnxr_dev *dev;
5738         qlnx_host_t     *ha;
5739
5740         dev = get_qlnxr_dev((ibcq->device));
5741         ha = dev->ha;
5742
5743         QL_DPRINT12(ha, "enter ibcq = %p flags = 0x%x "
5744                 "cp = %p cons = 0x%x cq_type = 0x%x\n", ibcq,
5745                 flags, cq, cq->cq_cons, cq->cq_type);
5746
5747         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5748                 return -EINVAL;
5749
5750         if (cq->destroyed) {
5751                 QL_DPRINT11(ha, "cq was already destroyed cq = %p icid=%d\n",
5752                         cq, cq->icid);
5753                 return -EINVAL;
5754         }
5755
5756         if (cq->cq_type == QLNXR_CQ_TYPE_GSI) {
5757                 return 0;
5758         }
5759
5760         spin_lock_irqsave(&cq->cq_lock, sflags);
5761
5762         cq->arm_flags = 0;
5763
5764         if (flags & IB_CQ_SOLICITED) {
5765                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
5766         }
5767         if (flags & IB_CQ_NEXT_COMP) {
5768                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
5769         }
5770
5771         doorbell_cq(dev, cq, (cq->cq_cons - 1), cq->arm_flags);
5772
5773         spin_unlock_irqrestore(&cq->cq_lock, sflags);
5774
5775         QL_DPRINT12(ha, "exit ibcq = %p flags = 0x%x\n", ibcq, flags);
5776         return 0;
5777 }
5778
5779 static struct qlnxr_mr *
5780 __qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
5781 {
5782         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
5783         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
5784         struct qlnxr_mr *mr;
5785         int             rc = -ENOMEM;
5786         qlnx_host_t     *ha;
5787
5788         ha = dev->ha;
5789
5790         QL_DPRINT12(ha, "enter ibpd = %p pd = %p "
5791                 " pd_id = %d max_page_list_len = %d\n",
5792                 ibpd, pd, pd->pd_id, max_page_list_len);
5793
5794         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5795         if (!mr) {
5796                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
5797                 return ERR_PTR(rc);
5798         }
5799
5800         mr->dev = dev;
5801         mr->type = QLNXR_MR_FRMR;
5802
5803         rc = qlnxr_init_mr_info(dev, &mr->info, max_page_list_len,
5804                                   1 /* allow dual layer pbl */);
5805         if (rc) {
5806                 QL_DPRINT11(ha, "qlnxr_init_mr_info failed\n");
5807                 goto err0;
5808         }
5809
5810         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
5811         if (rc) {
5812                 QL_DPRINT11(ha, "ecore_rdma_alloc_tid failed\n");
5813                 goto err0;
5814         }
5815
5816         /* index only, 18 bit long, lkey = itid << 8 | key */
5817         mr->hw_mr.tid_type = ECORE_RDMA_TID_FMR;
5818         mr->hw_mr.key = 0;
5819         mr->hw_mr.pd = pd->pd_id;
5820         mr->hw_mr.local_read = 1;
5821         mr->hw_mr.local_write = 0;
5822         mr->hw_mr.remote_read = 0;
5823         mr->hw_mr.remote_write = 0;
5824         mr->hw_mr.remote_atomic = 0;
5825         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
5826         mr->hw_mr.pbl_ptr = 0; /* Will be supplied during post */
5827         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
5828         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
5829         mr->hw_mr.fbo = 0;
5830         mr->hw_mr.length = 0;
5831         mr->hw_mr.vaddr = 0;
5832         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
5833         mr->hw_mr.phy_mr = true; /* Fast MR - True, Regular Register False */
5834         mr->hw_mr.dma_mr = false;
5835
5836         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
5837         if (rc) {
5838                 QL_DPRINT11(ha, "ecore_rdma_register_tid failed\n");
5839                 goto err1;
5840         }
5841
5842         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
5843         mr->ibmr.rkey = mr->ibmr.lkey;
5844
5845         QL_DPRINT12(ha, "exit mr = %p mr->ibmr.lkey = 0x%x\n",
5846                 mr, mr->ibmr.lkey);
5847
5848         return mr;
5849
5850 err1:
5851         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
5852 err0:
5853         kfree(mr);
5854
5855         QL_DPRINT12(ha, "exit\n");
5856
5857         return ERR_PTR(rc);
5858 }
5859
5860 #if __FreeBSD_version >= 1102000
5861
5862 struct ib_mr *
5863 qlnxr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg)
5864 {
5865         struct qlnxr_dev *dev;
5866         struct qlnxr_mr *mr;
5867         qlnx_host_t     *ha;
5868
5869         dev = get_qlnxr_dev(ibpd->device);
5870         ha = dev->ha;
5871
5872         QL_DPRINT12(ha, "enter\n");
5873
5874         if (mr_type != IB_MR_TYPE_MEM_REG)
5875                 return ERR_PTR(-EINVAL);
5876
5877         mr = __qlnxr_alloc_mr(ibpd, max_num_sg);
5878
5879         if (IS_ERR(mr))
5880                 return ERR_PTR(-EINVAL);
5881
5882         QL_DPRINT12(ha, "exit mr = %p &mr->ibmr = %p\n", mr, &mr->ibmr);
5883
5884         return &mr->ibmr;
5885 }
5886
5887 static int
5888 qlnxr_set_page(struct ib_mr *ibmr, u64 addr)
5889 {
5890         struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5891         struct qlnxr_pbl *pbl_table;
5892         struct regpair *pbe;
5893         struct qlnxr_dev *dev;
5894         qlnx_host_t     *ha;
5895         u32 pbes_in_page;
5896
5897         dev = mr->dev;
5898         ha = dev->ha;
5899
5900         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
5901                 QL_DPRINT12(ha, "fails mr->npages %d\n", mr->npages);
5902                 return -ENOMEM;
5903         }
5904
5905         QL_DPRINT12(ha, "mr->npages %d addr = %p enter\n", mr->npages,
5906                 ((void *)addr));
5907
5908         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
5909         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
5910         pbe = (struct regpair *)pbl_table->va;
5911         pbe +=  mr->npages % pbes_in_page;
5912         pbe->lo = cpu_to_le32((u32)addr);
5913         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
5914
5915         mr->npages++;
5916
5917         QL_DPRINT12(ha, "mr->npages %d addr = %p exit \n", mr->npages,
5918                 ((void *)addr));
5919         return 0;
5920 }
5921
5922 int
5923 qlnxr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
5924         int sg_nents, unsigned int *sg_offset)
5925 {
5926         int             ret;
5927         struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5928         qlnx_host_t     *ha;
5929
5930         if (mr == NULL)
5931                 return (-1);
5932
5933         if (mr->dev == NULL)
5934                 return (-1);
5935
5936         ha = mr->dev->ha;
5937
5938         QL_DPRINT12(ha, "enter\n");
5939
5940         mr->npages = 0;
5941         qlnx_handle_completed_mrs(mr->dev, &mr->info);
5942
5943         ret = ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qlnxr_set_page);
5944
5945         QL_DPRINT12(ha, "exit ret = %d\n", ret);
5946
5947         return (ret);
5948 }
5949
5950 #else
5951
5952 struct ib_mr *
5953 qlnxr_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
5954 {
5955         struct qlnxr_dev *dev;
5956         struct qlnxr_mr *mr;
5957         qlnx_host_t     *ha;
5958         struct ib_mr *ibmr = NULL;
5959
5960         dev = get_qlnxr_dev((ibpd->device));
5961         ha = dev->ha;
5962
5963         QL_DPRINT12(ha, "enter\n");
5964
5965         mr = __qlnxr_alloc_mr(ibpd, max_page_list_len);
5966
5967         if (IS_ERR(mr)) {
5968                 ibmr = ERR_PTR(-EINVAL);
5969         } else {
5970                 ibmr = &mr->ibmr;
5971         }
5972
5973         QL_DPRINT12(ha, "exit %p\n", ibmr);
5974         return (ibmr);
5975 }
5976
5977 void
5978 qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
5979 {
5980         struct qlnxr_fast_reg_page_list *frmr_list;
5981
5982         frmr_list = get_qlnxr_frmr_list(page_list);
5983
5984         free_mr_info(frmr_list->dev, &frmr_list->info);
5985
5986         kfree(frmr_list->ibfrpl.page_list);
5987         kfree(frmr_list);
5988
5989         return;
5990 }
5991
5992 struct ib_fast_reg_page_list *
5993 qlnxr_alloc_frmr_page_list(struct ib_device *ibdev, int page_list_len)
5994 {
5995         struct qlnxr_fast_reg_page_list *frmr_list = NULL;
5996         struct qlnxr_dev                *dev;
5997         int                             size = page_list_len * sizeof(u64);
5998         int                             rc = -ENOMEM;
5999         qlnx_host_t                     *ha;
6000
6001         dev = get_qlnxr_dev(ibdev);
6002         ha = dev->ha;
6003
6004         QL_DPRINT12(ha, "enter\n");
6005
6006         frmr_list = kzalloc(sizeof(*frmr_list), GFP_KERNEL);
6007         if (!frmr_list) {
6008                 QL_DPRINT11(ha, "kzalloc(frmr_list) failed\n");
6009                 goto err;
6010         }
6011
6012         frmr_list->dev = dev;
6013         frmr_list->ibfrpl.page_list = kzalloc(size, GFP_KERNEL);
6014         if (!frmr_list->ibfrpl.page_list) {
6015                 QL_DPRINT11(ha, "frmr_list->ibfrpl.page_list = NULL failed\n");
6016                 goto err0;
6017         }
6018
6019         rc = qlnxr_init_mr_info(dev, &frmr_list->info, page_list_len,
6020                           1 /* allow dual layer pbl */);
6021         if (rc)
6022                 goto err1;
6023
6024         QL_DPRINT12(ha, "exit %p\n", &frmr_list->ibfrpl);
6025
6026         return &frmr_list->ibfrpl;
6027
6028 err1:
6029         kfree(frmr_list->ibfrpl.page_list);
6030 err0:
6031         kfree(frmr_list);
6032 err:
6033         QL_DPRINT12(ha, "exit with error\n");
6034
6035         return ERR_PTR(rc);
6036 }
6037
6038 static int
6039 qlnxr_validate_phys_buf_list(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
6040         int buf_cnt, uint64_t *total_size)
6041 {
6042         u64 size = 0;
6043
6044         *total_size = 0;
6045
6046         if (!buf_cnt || buf_list == NULL) {
6047                 QL_DPRINT11(ha,
6048                         "failed buf_list = %p buf_cnt = %d\n", buf_list, buf_cnt);
6049                 return (-1);
6050         }
6051
6052         size = buf_list->size;
6053
6054         if (!size) {
6055                 QL_DPRINT11(ha,
6056                         "failed buf_list = %p buf_cnt = %d"
6057                         " buf_list->size = 0\n", buf_list, buf_cnt);
6058                 return (-1);
6059         }
6060
6061         while (buf_cnt) {
6062                 *total_size += buf_list->size;
6063
6064                 if (buf_list->size != size) {
6065                         QL_DPRINT11(ha,
6066                                 "failed buf_list = %p buf_cnt = %d"
6067                                 " all buffers should have same size\n",
6068                                 buf_list, buf_cnt);
6069                         return (-1);
6070                 }
6071
6072                 buf_list++;
6073                 buf_cnt--;
6074         }
6075         return (0);
6076 }
6077
6078 static size_t
6079 qlnxr_get_num_pages(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
6080         int buf_cnt)
6081 {
6082         int     i;
6083         size_t  num_pages = 0;
6084         u64     size;
6085
6086         for (i = 0; i < buf_cnt; i++) {
6087                 size = 0;
6088                 while (size < buf_list->size) {
6089                         size += PAGE_SIZE;
6090                         num_pages++;
6091                 }
6092                 buf_list++;
6093         }
6094         return (num_pages);
6095 }
6096
6097 static void
6098 qlnxr_populate_phys_mem_pbls(struct qlnxr_dev *dev,
6099         struct ib_phys_buf *buf_list, int buf_cnt,
6100         struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
6101 {
6102         struct regpair          *pbe;
6103         struct qlnxr_pbl        *pbl_tbl;
6104         int                     pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
6105         qlnx_host_t             *ha;
6106         int                     i;
6107         u64                     pbe_addr;
6108
6109         ha = dev->ha;
6110
6111         QL_DPRINT12(ha, "enter\n");
6112
6113         if (!pbl_info) {
6114                 QL_DPRINT11(ha, "PBL_INFO not initialized\n");
6115                 return;
6116         }
6117
6118         if (!pbl_info->num_pbes) {
6119                 QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
6120                 return;
6121         }
6122
6123         /* If we have a two layered pbl, the first pbl points to the rest
6124          * of the pbls and the first entry lays on the second pbl in the table
6125          */
6126         if (pbl_info->two_layered)
6127                 pbl_tbl = &pbl[1];
6128         else
6129                 pbl_tbl = pbl;
6130
6131         pbe = (struct regpair *)pbl_tbl->va;
6132         if (!pbe) {
6133                 QL_DPRINT12(ha, "pbe is NULL\n");
6134                 return;
6135         }
6136
6137         pbe_cnt = 0;
6138
6139         for (i = 0; i < buf_cnt; i++) {
6140                 pages = buf_list->size >> PAGE_SHIFT;
6141
6142                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
6143                         /* store the page address in pbe */
6144
6145                         pbe_addr = buf_list->addr + (PAGE_SIZE * pg_cnt);
6146
6147                         pbe->lo = cpu_to_le32((u32)pbe_addr);
6148                         pbe->hi = cpu_to_le32(((u32)(pbe_addr >> 32)));
6149
6150                         QL_DPRINT12(ha, "Populate pbl table:"
6151                                 " pbe->addr=0x%x:0x%x "
6152                                 " pbe_cnt = %d total_num_pbes=%d"
6153                                 " pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
6154                                 total_num_pbes, pbe);
6155
6156                         pbe_cnt ++;
6157                         total_num_pbes ++;
6158                         pbe++;
6159
6160                         if (total_num_pbes == pbl_info->num_pbes)
6161                                 return;
6162
6163                         /* if the given pbl is full storing the pbes,
6164                          * move to next pbl.  */
6165
6166                         if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
6167                                 pbl_tbl++;
6168                                 pbe = (struct regpair *)pbl_tbl->va;
6169                                 pbe_cnt = 0;
6170                         }
6171                 }
6172                 buf_list++;
6173         }
6174         QL_DPRINT12(ha, "exit\n");
6175         return;
6176 }
6177
6178 struct ib_mr *
6179 qlnxr_reg_kernel_mr(struct ib_pd *ibpd,
6180         struct ib_phys_buf *buf_list,
6181         int buf_cnt, int acc, u64 *iova_start)
6182 {
6183         int             rc = -ENOMEM;
6184         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
6185         struct qlnxr_mr *mr;
6186         struct qlnxr_pd *pd;
6187         qlnx_host_t     *ha;
6188         size_t          num_pages = 0;
6189         uint64_t        length;
6190
6191         ha = dev->ha;
6192
6193         QL_DPRINT12(ha, "enter\n");
6194
6195         pd = get_qlnxr_pd(ibpd);
6196
6197         QL_DPRINT12(ha, "pd = %d buf_list = %p, buf_cnt = %d,"
6198                 " iova_start = %p, acc = %d\n",
6199                 pd->pd_id, buf_list, buf_cnt, iova_start, acc);
6200
6201         //if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
6202         //      QL_DPRINT11(ha, "(acc & IB_ACCESS_REMOTE_WRITE &&"
6203         //              " !(acc & IB_ACCESS_LOCAL_WRITE))\n");
6204         //      return ERR_PTR(-EINVAL);
6205         //}
6206
6207         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
6208         if (!mr) {
6209                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
6210                 return ERR_PTR(rc);
6211         }
6212
6213         mr->type = QLNXR_MR_KERNEL;
6214         mr->iova_start = iova_start;
6215
6216         rc = qlnxr_validate_phys_buf_list(ha, buf_list, buf_cnt, &length);
6217         if (rc)
6218                 goto err0;
6219
6220         num_pages = qlnxr_get_num_pages(ha, buf_list, buf_cnt);
6221         if (!num_pages)
6222                 goto err0;
6223
6224         rc = qlnxr_init_mr_info(dev, &mr->info, num_pages, 1);
6225         if (rc) {
6226                 QL_DPRINT11(ha,
6227                         "qlnxr_init_mr_info failed [%d]\n", rc);
6228                 goto err1;
6229         }
6230
6231         qlnxr_populate_phys_mem_pbls(dev, buf_list, buf_cnt, mr->info.pbl_table,
6232                    &mr->info.pbl_info);
6233
6234         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
6235
6236         if (rc) {
6237                 QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
6238                 goto err1;
6239         }
6240
6241         /* index only, 18 bit long, lkey = itid << 8 | key */
6242         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
6243         mr->hw_mr.key = 0;
6244         mr->hw_mr.pd = pd->pd_id;
6245         mr->hw_mr.local_read = 1;
6246         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
6247         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
6248         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
6249         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
6250         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
6251         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
6252         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
6253         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
6254         mr->hw_mr.page_size_log = ilog2(PAGE_SIZE); /* for the MR pages */
6255
6256         mr->hw_mr.fbo = 0;
6257
6258         mr->hw_mr.length = length;
6259         mr->hw_mr.vaddr = (uint64_t)iova_start;
6260         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
6261         mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
6262         mr->hw_mr.dma_mr = false;
6263
6264         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
6265         if (rc) {
6266                 QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
6267                 goto err2;
6268         }
6269
6270         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6271         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
6272                 mr->hw_mr.remote_atomic)
6273                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6274
6275         QL_DPRINT12(ha, "lkey: %x\n", mr->ibmr.lkey);
6276
6277         return (&mr->ibmr);
6278
6279 err2:
6280         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
6281 err1:
6282         qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
6283 err0:
6284         kfree(mr);
6285
6286         QL_DPRINT12(ha, "exit [%d]\n", rc);
6287         return (ERR_PTR(rc));
6288 }
6289
6290 #endif /* #if __FreeBSD_version >= 1102000 */
6291
6292 struct ib_ah *
6293 #if __FreeBSD_version >= 1102000
6294 qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
6295         struct ib_udata *udata)
6296 #else
6297 qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
6298 #endif /* #if __FreeBSD_version >= 1102000 */
6299 {
6300         struct qlnxr_dev *dev;
6301         qlnx_host_t     *ha;
6302         struct qlnxr_ah *ah;
6303
6304         dev = get_qlnxr_dev((ibpd->device));
6305         ha = dev->ha;
6306
6307         QL_DPRINT12(ha, "in create_ah\n");
6308
6309         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
6310         if (!ah) {
6311                 QL_DPRINT12(ha, "no address handle can be allocated\n");
6312                 return ERR_PTR(-ENOMEM);
6313         }
6314
6315         ah->attr = *attr;       
6316
6317         return &ah->ibah;
6318 }
6319
6320 int
6321 qlnxr_destroy_ah(struct ib_ah *ibah)
6322 {
6323         struct qlnxr_dev *dev;
6324         qlnx_host_t     *ha;
6325         struct qlnxr_ah *ah = get_qlnxr_ah(ibah);
6326
6327         dev = get_qlnxr_dev((ibah->device));
6328         ha = dev->ha;
6329
6330         QL_DPRINT12(ha, "in destroy_ah\n");
6331
6332         kfree(ah);
6333         return 0;
6334 }
6335
6336 int
6337 qlnxr_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6338 {
6339         struct qlnxr_dev *dev;
6340         qlnx_host_t     *ha;
6341
6342         dev = get_qlnxr_dev((ibah->device));
6343         ha = dev->ha;
6344         QL_DPRINT12(ha, "Query AH not supported\n");
6345         return -EINVAL;
6346 }
6347
6348 int
6349 qlnxr_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6350 {
6351         struct qlnxr_dev *dev;
6352         qlnx_host_t     *ha;
6353
6354         dev = get_qlnxr_dev((ibah->device));
6355         ha = dev->ha;
6356         QL_DPRINT12(ha, "Modify AH not supported\n");
6357         return -ENOSYS;
6358 }
6359
6360 #if __FreeBSD_version >= 1102000
6361 int
6362 qlnxr_process_mad(struct ib_device *ibdev,
6363                 int process_mad_flags,
6364                 u8 port_num,
6365                 const struct ib_wc *in_wc,
6366                 const struct ib_grh *in_grh,
6367                 const struct ib_mad_hdr *mad_hdr,
6368                 size_t in_mad_size,
6369                 struct ib_mad_hdr *out_mad,
6370                 size_t *out_mad_size,
6371                 u16 *out_mad_pkey_index)
6372
6373 #else
6374
6375 int
6376 qlnxr_process_mad(struct ib_device *ibdev,
6377                         int process_mad_flags,
6378                         u8 port_num,
6379                         struct ib_wc *in_wc,
6380                         struct ib_grh *in_grh,
6381                         struct ib_mad *in_mad,
6382                         struct ib_mad *out_mad)
6383
6384 #endif /* #if __FreeBSD_version >= 1102000 */
6385 {
6386         struct qlnxr_dev *dev;
6387         qlnx_host_t     *ha;
6388
6389         dev = get_qlnxr_dev(ibdev);
6390         ha = dev->ha;
6391         QL_DPRINT12(ha, "process mad not supported\n");
6392
6393         return -ENOSYS;
6394 //      QL_DPRINT12(ha, "qlnxr_process_mad in_mad %x %x %x %x %x %x %x %x\n",
6395 //               in_mad->mad_hdr.attr_id, in_mad->mad_hdr.base_version,
6396 //               in_mad->mad_hdr.attr_mod, in_mad->mad_hdr.class_specific,
6397 //               in_mad->mad_hdr.class_version, in_mad->mad_hdr.method,
6398 //               in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.status);
6399
6400 //      return IB_MAD_RESULT_SUCCESS;   
6401 }
6402
6403 #if __FreeBSD_version >= 1102000
6404 int
6405 qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
6406         struct ib_port_immutable *immutable)
6407 {
6408         struct qlnxr_dev        *dev;
6409         qlnx_host_t             *ha;
6410         struct ib_port_attr     attr;
6411         int                     err;
6412
6413         dev = get_qlnxr_dev(ibdev);
6414         ha = dev->ha;
6415
6416         QL_DPRINT12(ha, "enter\n");
6417
6418         err = qlnxr_query_port(ibdev, port_num, &attr);
6419         if (err)
6420                 return err;
6421
6422         if (QLNX_IS_IWARP(dev)) {
6423                 immutable->pkey_tbl_len = 1;
6424                 immutable->gid_tbl_len = 1;
6425                 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
6426                 immutable->max_mad_size = 0;
6427         } else {
6428                 immutable->pkey_tbl_len = attr.pkey_tbl_len;
6429                 immutable->gid_tbl_len = attr.gid_tbl_len;
6430                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
6431                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
6432         }
6433
6434         QL_DPRINT12(ha, "exit\n");
6435         return 0;
6436 }
6437 #endif /* #if __FreeBSD_version > 1102000 */
6438
6439 /***** iWARP related functions *************/
6440
6441 static void
6442 qlnxr_iw_mpa_request(void *context,
6443         struct ecore_iwarp_cm_event_params *params)
6444 {
6445         struct qlnxr_iw_listener *listener = (struct qlnxr_iw_listener *)context;
6446         struct qlnxr_dev *dev = listener->dev;
6447         struct qlnxr_iw_ep *ep;
6448         struct iw_cm_event event;
6449         struct sockaddr_in *laddr;
6450         struct sockaddr_in *raddr;
6451         qlnx_host_t     *ha;
6452
6453         ha = dev->ha;
6454
6455         QL_DPRINT12(ha, "enter\n");
6456
6457         if (params->cm_info->ip_version != ECORE_TCP_IPV4) {
6458                 QL_DPRINT11(ha, "only IPv4 supported [0x%x]\n",
6459                         params->cm_info->ip_version);
6460                 return;
6461         }
6462
6463         ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
6464
6465         if (!ep) {
6466                 QL_DPRINT11(ha, "kzalloc{ep) failed\n");
6467                 return;
6468         }
6469
6470         ep->dev = dev;
6471         ep->ecore_context = params->ep_context;
6472
6473         memset(&event, 0, sizeof(event));
6474
6475         event.event = IW_CM_EVENT_CONNECT_REQUEST;
6476         event.status = params->status;
6477
6478         laddr = (struct sockaddr_in *)&event.local_addr;
6479         raddr = (struct sockaddr_in *)&event.remote_addr;
6480
6481         laddr->sin_family = AF_INET;
6482         raddr->sin_family = AF_INET;
6483
6484         laddr->sin_port = htons(params->cm_info->local_port);
6485         raddr->sin_port = htons(params->cm_info->remote_port);
6486
6487         laddr->sin_addr.s_addr = htonl(params->cm_info->local_ip[0]);
6488         raddr->sin_addr.s_addr = htonl(params->cm_info->remote_ip[0]);
6489
6490         event.provider_data = (void *)ep;
6491         event.private_data = (void *)params->cm_info->private_data;
6492         event.private_data_len = (u8)params->cm_info->private_data_len;
6493
6494 #if __FreeBSD_version >= 1100000
6495         event.ord = params->cm_info->ord;
6496         event.ird = params->cm_info->ird;
6497 #endif /* #if __FreeBSD_version >= 1100000 */
6498
6499         listener->cm_id->event_handler(listener->cm_id, &event);
6500
6501         QL_DPRINT12(ha, "exit\n");
6502
6503         return;
6504 }
6505
6506 static void
6507 qlnxr_iw_issue_event(void *context,
6508          struct ecore_iwarp_cm_event_params *params,
6509          enum iw_cm_event_type event_type,
6510          char *str)
6511 {
6512         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6513         struct qlnxr_dev *dev = ep->dev;
6514         struct iw_cm_event event;
6515         qlnx_host_t     *ha;
6516
6517         ha = dev->ha;
6518
6519         QL_DPRINT12(ha, "enter\n");
6520
6521         memset(&event, 0, sizeof(event));
6522         event.status = params->status;
6523         event.event = event_type;
6524
6525         if (params->cm_info != NULL) {
6526 #if __FreeBSD_version >= 1100000
6527                 event.ird = params->cm_info->ird;
6528                 event.ord = params->cm_info->ord;
6529                 QL_DPRINT12(ha, "ord=[%d] \n", event.ord);
6530                 QL_DPRINT12(ha, "ird=[%d] \n", event.ird);
6531 #endif /* #if __FreeBSD_version >= 1100000 */
6532
6533                 event.private_data_len = params->cm_info->private_data_len;
6534                 event.private_data = (void *)params->cm_info->private_data;
6535                 QL_DPRINT12(ha, "private_data_len=[%d] \n",
6536                         event.private_data_len);
6537         }
6538
6539         QL_DPRINT12(ha, "event=[%d] %s\n", event.event, str);
6540         QL_DPRINT12(ha, "status=[%d] \n", event.status);
6541
6542         if (ep) {
6543                 if (ep->cm_id)
6544                         ep->cm_id->event_handler(ep->cm_id, &event);
6545                 else
6546                         QL_DPRINT11(ha, "ep->cm_id == NULL \n");
6547         } else {
6548                 QL_DPRINT11(ha, "ep == NULL \n");
6549         }
6550
6551         QL_DPRINT12(ha, "exit\n");
6552
6553         return;
6554 }
6555
6556 static void
6557 qlnxr_iw_close_event(void *context,
6558          struct ecore_iwarp_cm_event_params *params)
6559 {
6560         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6561         struct qlnxr_dev *dev = ep->dev;
6562         qlnx_host_t     *ha;
6563
6564         ha = dev->ha;
6565
6566         QL_DPRINT12(ha, "enter\n");
6567
6568         if (ep->cm_id) {
6569                 qlnxr_iw_issue_event(context,
6570                                     params,
6571                                     IW_CM_EVENT_CLOSE,
6572                                     "IW_CM_EVENT_EVENT_CLOSE");
6573                 ep->cm_id->rem_ref(ep->cm_id);
6574                 ep->cm_id = NULL;
6575         }
6576
6577         QL_DPRINT12(ha, "exit\n");
6578
6579         return;
6580 }
6581
6582 #if __FreeBSD_version >= 1102000
6583
6584 static void
6585 qlnxr_iw_passive_complete(void *context,
6586         struct ecore_iwarp_cm_event_params *params)
6587 {
6588         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6589         struct qlnxr_dev        *dev = ep->dev;
6590         qlnx_host_t             *ha;
6591
6592         ha = dev->ha;
6593
6594         /* We will only reach the following state if MPA_REJECT was called on
6595          * passive. In this case there will be no associated QP.
6596          */
6597         if ((params->status == -ECONNREFUSED) && (ep->qp == NULL)) {
6598                 QL_DPRINT11(ha, "PASSIVE connection refused releasing ep...\n");
6599                 kfree(ep);
6600                 return;
6601         }
6602
6603         /* We always issue an established event, however, ofed does not look
6604          * at event code for established. So if there was a failure, we follow
6605          * with close...
6606          */
6607         qlnxr_iw_issue_event(context,
6608                 params,
6609                 IW_CM_EVENT_ESTABLISHED,
6610                 "IW_CM_EVENT_ESTABLISHED");
6611
6612         if (params->status < 0) {
6613                 qlnxr_iw_close_event(context, params);
6614         }
6615
6616         return;
6617 }
6618
6619 struct qlnxr_discon_work {
6620         struct work_struct work;
6621         struct qlnxr_iw_ep *ep;
6622         enum ecore_iwarp_event_type event;
6623         int status;
6624 };
6625
6626 static void
6627 qlnxr_iw_disconnect_worker(struct work_struct *work)
6628 {
6629         struct qlnxr_discon_work *dwork =
6630                 container_of(work, struct qlnxr_discon_work, work);
6631         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
6632         struct qlnxr_iw_ep *ep = dwork->ep;
6633         struct qlnxr_dev *dev = ep->dev;
6634         struct qlnxr_qp *qp = ep->qp;
6635         struct iw_cm_event event;
6636
6637         if (qp->destroyed) {
6638                 kfree(dwork);
6639                 qlnxr_iw_qp_rem_ref(&qp->ibqp);
6640                 return;
6641         }
6642
6643         memset(&event, 0, sizeof(event));
6644         event.status = dwork->status;
6645         event.event = IW_CM_EVENT_DISCONNECT;
6646
6647         /* Success means graceful disconnect was requested. modifying
6648          * to SQD is translated to graceful disconnect. O/w reset is sent
6649          */
6650         if (dwork->status)
6651                 qp_params.new_state = ECORE_ROCE_QP_STATE_ERR;
6652         else
6653                 qp_params.new_state = ECORE_ROCE_QP_STATE_SQD;
6654
6655         kfree(dwork);
6656
6657         if (ep->cm_id)
6658                 ep->cm_id->event_handler(ep->cm_id, &event);
6659
6660         SET_FIELD(qp_params.modify_flags,
6661                   ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
6662
6663         ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
6664
6665         qlnxr_iw_qp_rem_ref(&qp->ibqp);
6666
6667         return;
6668 }
6669
6670 void
6671 qlnxr_iw_disconnect_event(void *context,
6672         struct ecore_iwarp_cm_event_params *params)
6673 {
6674         struct qlnxr_discon_work *work;
6675         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6676         struct qlnxr_dev *dev = ep->dev;
6677         struct qlnxr_qp *qp = ep->qp;
6678
6679         work = kzalloc(sizeof(*work), GFP_ATOMIC);
6680         if (!work)
6681                 return;
6682
6683         qlnxr_iw_qp_add_ref(&qp->ibqp);
6684         work->ep = ep;
6685         work->event = params->event;
6686         work->status = params->status;
6687
6688         INIT_WORK(&work->work, qlnxr_iw_disconnect_worker);
6689         queue_work(dev->iwarp_wq, &work->work);
6690
6691         return;
6692 }
6693
6694 #endif /* #if __FreeBSD_version >= 1102000 */
6695
6696 static int
6697 qlnxr_iw_mpa_reply(void *context,
6698         struct ecore_iwarp_cm_event_params *params)
6699 {
6700         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6701         struct qlnxr_dev        *dev = ep->dev;
6702         struct ecore_iwarp_send_rtr_in rtr_in;
6703         int                     rc;
6704         qlnx_host_t             *ha;
6705
6706         ha = dev->ha;
6707
6708         QL_DPRINT12(ha, "enter\n");
6709
6710         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6711                 return -EINVAL;
6712
6713         bzero(&rtr_in, sizeof(struct ecore_iwarp_send_rtr_in));
6714         rtr_in.ep_context = params->ep_context;
6715
6716         rc = ecore_iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
6717
6718         QL_DPRINT12(ha, "exit rc = %d\n", rc);
6719         return rc;
6720 }
6721
6722 void
6723 qlnxr_iw_qp_event(void *context,
6724         struct ecore_iwarp_cm_event_params *params,
6725         enum ib_event_type ib_event,
6726         char *str)
6727 {
6728         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6729         struct qlnxr_dev *dev = ep->dev;
6730         struct ib_qp *ibqp = &(ep->qp->ibqp);
6731         struct ib_event event;
6732         qlnx_host_t     *ha;
6733
6734         ha = dev->ha;
6735
6736         QL_DPRINT12(ha,
6737                 "[context, event, event_handler] = [%p, 0x%x, %s, %p] enter\n",
6738                 context, params->event, str, ibqp->event_handler);
6739
6740         if (ibqp->event_handler) {
6741                 event.event = ib_event;
6742                 event.device = ibqp->device;
6743                 event.element.qp = ibqp;
6744                 ibqp->event_handler(&event, ibqp->qp_context);
6745         }
6746
6747         return;
6748 }
6749
6750 int
6751 qlnxr_iw_event_handler(void *context,
6752         struct ecore_iwarp_cm_event_params *params)
6753 {
6754         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6755         struct qlnxr_dev *dev = ep->dev;
6756         qlnx_host_t     *ha;
6757
6758         ha = dev->ha;
6759
6760         QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6761                 "enter\n", context, params->event);
6762
6763         switch (params->event) {
6764         /* Passive side request received */
6765         case ECORE_IWARP_EVENT_MPA_REQUEST:
6766                 qlnxr_iw_mpa_request(context, params);
6767                 break;
6768
6769         case ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY:
6770                 qlnxr_iw_mpa_reply(context, params);
6771                 break;
6772
6773         /* Passive side established ( ack on mpa response ) */
6774         case ECORE_IWARP_EVENT_PASSIVE_COMPLETE:
6775
6776 #if __FreeBSD_version >= 1102000
6777
6778                 ep->during_connect = 0;
6779                 qlnxr_iw_passive_complete(context, params);
6780
6781 #else
6782                 qlnxr_iw_issue_event(context,
6783                                     params,
6784                                     IW_CM_EVENT_ESTABLISHED,
6785                                     "IW_CM_EVENT_ESTABLISHED");
6786 #endif /* #if __FreeBSD_version >= 1102000 */
6787                 break;
6788
6789         /* Active side reply received */
6790         case ECORE_IWARP_EVENT_ACTIVE_COMPLETE:
6791                 ep->during_connect = 0;
6792                 qlnxr_iw_issue_event(context,
6793                                     params,
6794                                     IW_CM_EVENT_CONNECT_REPLY,
6795                                     "IW_CM_EVENT_CONNECT_REPLY");
6796                 if (params->status < 0) {
6797                         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6798
6799                         ep->cm_id->rem_ref(ep->cm_id);
6800                         ep->cm_id = NULL;
6801                 }
6802                 break;
6803
6804         case ECORE_IWARP_EVENT_DISCONNECT:
6805
6806 #if __FreeBSD_version >= 1102000
6807                 qlnxr_iw_disconnect_event(context, params);
6808 #else
6809                 qlnxr_iw_issue_event(context,
6810                                     params,
6811                                     IW_CM_EVENT_DISCONNECT,
6812                                     "IW_CM_EVENT_DISCONNECT");
6813                 qlnxr_iw_close_event(context, params);
6814 #endif /* #if __FreeBSD_version >= 1102000 */
6815                 break;
6816
6817         case ECORE_IWARP_EVENT_CLOSE:
6818                 ep->during_connect = 0;
6819                 qlnxr_iw_close_event(context, params);
6820                 break;
6821
6822         case ECORE_IWARP_EVENT_RQ_EMPTY:
6823                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6824                                  "IWARP_EVENT_RQ_EMPTY");
6825                 break;
6826
6827         case ECORE_IWARP_EVENT_IRQ_FULL:
6828                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6829                                  "IWARP_EVENT_IRQ_FULL");
6830                 break;
6831
6832         case ECORE_IWARP_EVENT_LLP_TIMEOUT:
6833                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6834                                  "IWARP_EVENT_LLP_TIMEOUT");
6835                 break;
6836
6837         case ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
6838                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6839                                  "IWARP_EVENT_REMOTE_PROTECTION_ERROR");
6840                 break;
6841
6842         case ECORE_IWARP_EVENT_CQ_OVERFLOW:
6843                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6844                                  "QED_IWARP_EVENT_CQ_OVERFLOW");
6845                 break;
6846
6847         case ECORE_IWARP_EVENT_QP_CATASTROPHIC:
6848                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6849                                  "QED_IWARP_EVENT_QP_CATASTROPHIC");
6850                 break;
6851
6852         case ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR:
6853                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6854                                  "IWARP_EVENT_LOCAL_ACCESS_ERROR");
6855                 break;
6856
6857         case ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR:
6858                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6859                                  "IWARP_EVENT_REMOTE_OPERATION_ERROR");
6860                 break;
6861
6862         case ECORE_IWARP_EVENT_TERMINATE_RECEIVED:
6863                 QL_DPRINT12(ha, "Got terminate message"
6864                         " ECORE_IWARP_EVENT_TERMINATE_RECEIVED\n");
6865                 break;
6866
6867         default:
6868                 QL_DPRINT12(ha,
6869                         "Unknown event [0x%x] received \n", params->event);
6870                 break;
6871         };
6872
6873         QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6874                 "exit\n", context, params->event);
6875         return 0;
6876 }
6877
6878 static int
6879 qlnxr_addr4_resolve(struct qlnxr_dev *dev,
6880                               struct sockaddr_in *src_in,
6881                               struct sockaddr_in *dst_in,
6882                               u8 *dst_mac)
6883 {
6884         int rc;
6885
6886 #if __FreeBSD_version >= 1100000
6887         rc = arpresolve(dev->ha->ifp, 0, NULL, (struct sockaddr *)dst_in,
6888                         dst_mac, NULL, NULL);
6889 #else
6890         struct llentry *lle;
6891
6892         rc = arpresolve(dev->ha->ifp, NULL, NULL, (struct sockaddr *)dst_in,
6893                         dst_mac, &lle);
6894 #endif
6895
6896         QL_DPRINT12(dev->ha, "rc = %d "
6897                 "sa_len = 0x%x sa_family = 0x%x IP Address = %d.%d.%d.%d "
6898                 "Dest MAC %02x:%02x:%02x:%02x:%02x:%02x\n", rc,
6899                 dst_in->sin_len, dst_in->sin_family,
6900                 NIPQUAD((dst_in->sin_addr.s_addr)),
6901                 dst_mac[0], dst_mac[1], dst_mac[2],
6902                 dst_mac[3], dst_mac[4], dst_mac[5]);
6903
6904         return rc;
6905 }
6906
6907 int
6908 qlnxr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6909 {
6910         struct qlnxr_dev *dev;
6911         struct ecore_iwarp_connect_out out_params;
6912         struct ecore_iwarp_connect_in in_params;
6913         struct qlnxr_iw_ep *ep;
6914         struct qlnxr_qp *qp;
6915         struct sockaddr_in *laddr;
6916         struct sockaddr_in *raddr;
6917         int rc = 0;
6918         qlnx_host_t     *ha;
6919
6920         dev = get_qlnxr_dev((cm_id->device));
6921         ha = dev->ha;
6922
6923         QL_DPRINT12(ha, "[cm_id, conn_param] = [%p, %p] "
6924                 "enter \n", cm_id, conn_param);
6925
6926         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6927                 return -EINVAL;
6928
6929         qp = idr_find(&dev->qpidr, conn_param->qpn);
6930
6931         laddr = (struct sockaddr_in *)&cm_id->local_addr;
6932         raddr = (struct sockaddr_in *)&cm_id->remote_addr;
6933
6934         QL_DPRINT12(ha,
6935                 "local = [%d.%d.%d.%d, %d] remote = [%d.%d.%d.%d, %d]\n",
6936                 NIPQUAD((laddr->sin_addr.s_addr)), laddr->sin_port,
6937                 NIPQUAD((raddr->sin_addr.s_addr)), raddr->sin_port);
6938
6939         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
6940         if (!ep) {
6941                 QL_DPRINT11(ha, "struct qlnxr_iw_ep "
6942                         "alloc memory failed\n");
6943                 return -ENOMEM;
6944         }
6945
6946         ep->dev = dev;
6947         ep->qp = qp;
6948         cm_id->add_ref(cm_id);
6949         ep->cm_id = cm_id;
6950
6951         memset(&in_params, 0, sizeof (struct ecore_iwarp_connect_in));
6952         memset(&out_params, 0, sizeof (struct ecore_iwarp_connect_out));
6953
6954         in_params.event_cb = qlnxr_iw_event_handler;
6955         in_params.cb_context = ep;
6956
6957         in_params.cm_info.ip_version = ECORE_TCP_IPV4;
6958
6959         in_params.cm_info.remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
6960         in_params.cm_info.local_ip[0] = ntohl(laddr->sin_addr.s_addr);
6961         in_params.cm_info.remote_port = ntohs(raddr->sin_port);
6962         in_params.cm_info.local_port = ntohs(laddr->sin_port);
6963         in_params.cm_info.vlan = 0;
6964         in_params.mss = dev->ha->ifp->if_mtu - 40;
6965
6966         QL_DPRINT12(ha, "remote_ip = [%d.%d.%d.%d] "
6967                 "local_ip = [%d.%d.%d.%d] remote_port = %d local_port = %d "
6968                 "vlan = %d\n",
6969                 NIPQUAD((in_params.cm_info.remote_ip[0])),
6970                 NIPQUAD((in_params.cm_info.local_ip[0])),
6971                 in_params.cm_info.remote_port, in_params.cm_info.local_port,
6972                 in_params.cm_info.vlan);
6973
6974         rc = qlnxr_addr4_resolve(dev, laddr, raddr, (u8 *)in_params.remote_mac_addr);
6975
6976         if (rc) {
6977                 QL_DPRINT11(ha, "qlnxr_addr4_resolve failed\n");
6978                 goto err;
6979         }
6980
6981         QL_DPRINT12(ha, "ord = %d ird=%d private_data=%p"
6982                 " private_data_len=%d rq_psn=%d\n",
6983                 conn_param->ord, conn_param->ird, conn_param->private_data,
6984                 conn_param->private_data_len, qp->rq_psn);
6985
6986         in_params.cm_info.ord = conn_param->ord;
6987         in_params.cm_info.ird = conn_param->ird;
6988         in_params.cm_info.private_data = conn_param->private_data;
6989         in_params.cm_info.private_data_len = conn_param->private_data_len;
6990         in_params.qp = qp->ecore_qp;
6991
6992         memcpy(in_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
6993
6994         rc = ecore_iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
6995
6996         if (rc) {
6997                 QL_DPRINT12(ha, "ecore_iwarp_connect failed\n");
6998                 goto err;
6999         }
7000
7001         QL_DPRINT12(ha, "exit\n");
7002
7003         return rc;
7004
7005 err:
7006         cm_id->rem_ref(cm_id);
7007         kfree(ep);
7008
7009         QL_DPRINT12(ha, "exit [%d]\n", rc);
7010         return rc;
7011 }
7012
7013 int
7014 qlnxr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
7015 {
7016         struct qlnxr_dev *dev;
7017         struct qlnxr_iw_listener *listener;
7018         struct ecore_iwarp_listen_in iparams;
7019         struct ecore_iwarp_listen_out oparams;
7020         struct sockaddr_in *laddr;
7021         qlnx_host_t     *ha;
7022         int rc;
7023
7024         dev = get_qlnxr_dev((cm_id->device));
7025         ha = dev->ha;
7026
7027         QL_DPRINT12(ha, "enter\n");
7028
7029         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7030                 return -EINVAL;
7031
7032         laddr = (struct sockaddr_in *)&cm_id->local_addr;
7033
7034         listener = kzalloc(sizeof(*listener), GFP_KERNEL);
7035
7036         if (listener == NULL) {
7037                 QL_DPRINT11(ha, "listener memory alloc failed\n");
7038                 return -ENOMEM;
7039         }
7040
7041         listener->dev = dev;
7042         cm_id->add_ref(cm_id);
7043         listener->cm_id = cm_id;
7044         listener->backlog = backlog;
7045
7046         memset(&iparams, 0, sizeof (struct ecore_iwarp_listen_in));
7047         memset(&oparams, 0, sizeof (struct ecore_iwarp_listen_out));
7048
7049         iparams.cb_context = listener;
7050         iparams.event_cb = qlnxr_iw_event_handler;
7051         iparams.max_backlog = backlog;
7052
7053         iparams.ip_version = ECORE_TCP_IPV4;
7054
7055         iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
7056         iparams.port = ntohs(laddr->sin_port);
7057         iparams.vlan = 0;
7058
7059         QL_DPRINT12(ha, "[%d.%d.%d.%d, %d] iparamsport=%d\n",
7060                 NIPQUAD((laddr->sin_addr.s_addr)),
7061                 laddr->sin_port, iparams.port);
7062
7063         rc = ecore_iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
7064         if (rc) {
7065                 QL_DPRINT11(ha,
7066                         "ecore_iwarp_create_listen failed rc = %d\n", rc);
7067                 goto err;
7068         }
7069
7070         listener->ecore_handle = oparams.handle;
7071         cm_id->provider_data = listener;
7072
7073         QL_DPRINT12(ha, "exit\n");
7074         return rc;
7075
7076 err:
7077         cm_id->rem_ref(cm_id);
7078         kfree(listener);
7079
7080         QL_DPRINT12(ha, "exit [%d]\n", rc);
7081         return rc;
7082 }
7083
7084 void
7085 qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id)
7086 {
7087         struct qlnxr_iw_listener *listener = cm_id->provider_data;
7088         struct qlnxr_dev *dev = get_qlnxr_dev((cm_id->device));
7089         int rc = 0;
7090         qlnx_host_t     *ha;
7091
7092         ha = dev->ha;
7093
7094         QL_DPRINT12(ha, "enter\n");
7095
7096         if (listener->ecore_handle)
7097                 rc = ecore_iwarp_destroy_listen(dev->rdma_ctx,
7098                                 listener->ecore_handle);
7099
7100         cm_id->rem_ref(cm_id);
7101
7102         QL_DPRINT12(ha, "exit [%d]\n", rc);
7103         return;
7104 }
7105
7106 int
7107 qlnxr_iw_accept(struct iw_cm_id *cm_id,
7108         struct iw_cm_conn_param *conn_param)
7109 {
7110         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7111         struct qlnxr_dev *dev = ep->dev;
7112         struct qlnxr_qp *qp;
7113         struct ecore_iwarp_accept_in params;
7114         int rc;
7115         qlnx_host_t     *ha;
7116
7117         ha = dev->ha;
7118
7119         QL_DPRINT12(ha, "enter  qpid=%d\n", conn_param->qpn);
7120
7121         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7122                 return -EINVAL;
7123
7124         qp = idr_find(&dev->qpidr, conn_param->qpn);
7125         if (!qp) {
7126                 QL_DPRINT11(ha, "idr_find failed invalid qpn = %d\n",
7127                         conn_param->qpn);
7128                 return -EINVAL;
7129         }
7130         ep->qp = qp;
7131         qp->ep = ep;
7132         cm_id->add_ref(cm_id);
7133         ep->cm_id = cm_id;
7134
7135         params.ep_context = ep->ecore_context;
7136         params.cb_context = ep;
7137         params.qp = ep->qp->ecore_qp;
7138         params.private_data = conn_param->private_data;
7139         params.private_data_len = conn_param->private_data_len;
7140         params.ird = conn_param->ird;
7141         params.ord = conn_param->ord;
7142
7143         rc = ecore_iwarp_accept(dev->rdma_ctx, &params);
7144         if (rc) {
7145                 QL_DPRINT11(ha, "ecore_iwarp_accept failed %d\n", rc);
7146                 goto err;
7147         }
7148
7149         QL_DPRINT12(ha, "exit\n");
7150         return 0;
7151 err:
7152         cm_id->rem_ref(cm_id);
7153         QL_DPRINT12(ha, "exit rc = %d\n", rc);
7154         return rc;
7155 }
7156
7157 int
7158 qlnxr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
7159 {
7160 #if __FreeBSD_version >= 1102000
7161
7162         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7163         struct qlnxr_dev *dev = ep->dev;
7164         struct ecore_iwarp_reject_in params;
7165         int rc;
7166
7167         params.ep_context = ep->ecore_context;
7168         params.cb_context = ep;
7169         params.private_data = pdata;
7170         params.private_data_len = pdata_len;
7171         ep->qp = NULL;
7172
7173         rc = ecore_iwarp_reject(dev->rdma_ctx, &params);
7174
7175         return rc;
7176
7177 #else
7178
7179         printf("iWARP reject_cr not implemented\n");
7180         return -EINVAL;
7181
7182 #endif /* #if __FreeBSD_version >= 1102000 */
7183 }
7184
7185 void
7186 qlnxr_iw_qp_add_ref(struct ib_qp *ibqp)
7187 {
7188         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7189         qlnx_host_t     *ha;
7190
7191         ha = qp->dev->ha;
7192
7193         QL_DPRINT12(ha, "enter ibqp = %p\n", ibqp);
7194
7195         atomic_inc(&qp->refcnt);
7196
7197         QL_DPRINT12(ha, "exit \n");
7198         return;
7199 }
7200
7201 void
7202 qlnxr_iw_qp_rem_ref(struct ib_qp *ibqp)
7203 {
7204         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7205         qlnx_host_t     *ha;
7206
7207         ha = qp->dev->ha;
7208
7209         QL_DPRINT12(ha, "enter ibqp = %p qp = %p\n", ibqp, qp);
7210
7211         if (atomic_dec_and_test(&qp->refcnt)) {
7212                 qlnxr_idr_remove(qp->dev, qp->qp_id);
7213                 kfree(qp);
7214         }
7215
7216         QL_DPRINT12(ha, "exit \n");
7217         return;
7218 }
7219
7220 struct ib_qp *
7221 qlnxr_iw_get_qp(struct ib_device *ibdev, int qpn)
7222 {
7223         struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
7224         struct ib_qp *qp;
7225         qlnx_host_t     *ha;
7226
7227         ha = dev->ha;
7228
7229         QL_DPRINT12(ha, "enter dev = %p ibdev = %p qpn = %d\n", dev, ibdev, qpn);
7230
7231         qp = idr_find(&dev->qpidr, qpn);
7232
7233         QL_DPRINT12(ha, "exit qp = %p\n", qp);
7234
7235         return (qp);
7236 }