]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxr/qlnxr_verbs.c
Upgrade Unbound to 1.9.2.
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxr / qlnxr_verbs.c
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28
29 /*
30  * File: qlnxr_verbs.c
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "qlnxr_def.h"
36 #include "rdma_common.h"
37 #include "qlnxr_roce.h"
38 #include "qlnxr_cm.h"
39
40 #define upper_32_bits(x) (uint32_t)(x >> 32)
41 #define lower_32_bits(x) (uint32_t)(x)
42 #define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
43
44 #define TYPEPTR_ADDR_SET(type_ptr, field, vaddr)                        \
45         do {                                                            \
46                 (type_ptr)->field.hi = cpu_to_le32(upper_32_bits(vaddr));\
47                 (type_ptr)->field.lo = cpu_to_le32(lower_32_bits(vaddr));\
48         } while (0)
49
50
51 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
52         do {                                                    \
53                 TYPEPTR_ADDR_SET(sge, addr, vaddr);             \
54                 (sge)->length = cpu_to_le32(vlength);           \
55                 (sge)->flags = cpu_to_le32(vflags);             \
56         } while (0)
57
58 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
59         do {                                                    \
60                 TYPEPTR_ADDR_SET(hdr, wr_id, vwr_id);           \
61                 (hdr)->num_sges = num_sge;                      \
62         } while (0)
63
64 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
65         do {                                                    \
66                 TYPEPTR_ADDR_SET(sge, addr, vaddr);             \
67                 (sge)->length = cpu_to_le32(vlength);           \
68                 (sge)->l_key = cpu_to_le32(vlkey);              \
69         } while (0)
70
71 #define NIPQUAD(addr) \
72         ((unsigned char *)&addr)[0], \
73         ((unsigned char *)&addr)[1], \
74         ((unsigned char *)&addr)[2], \
75         ((unsigned char *)&addr)[3]
76
77 static int
78 qlnxr_check_srq_params(struct ib_pd *ibpd,
79         struct qlnxr_dev *dev,
80         struct ib_srq_init_attr *attrs);
81
82 static int
83 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
84         struct qlnxr_srq *srq,
85         struct qlnxr_create_srq_ureq *ureq,
86         int access, int dmasync);
87
88 static int
89 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
90         struct qlnxr_dev *dev,
91         struct ib_srq_init_attr *init_attr);
92
93
94 static int
95 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
96         struct qlnxr_srq *srq,
97         struct ib_udata *udata);
98
99 static void
100 qlnxr_free_srq_user_params(struct qlnxr_srq *srq);
101
102 static void
103 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq);
104
105
106 static u32
107 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq);
108
109 int
110 qlnxr_iw_query_gid(struct ib_device *ibdev, u8 port, int index,
111         union ib_gid *sgid)
112 {
113         struct qlnxr_dev        *dev;
114         qlnx_host_t             *ha;
115
116         dev = get_qlnxr_dev(ibdev);
117         ha = dev->ha;
118
119         QL_DPRINT12(ha, "enter\n");
120
121         memset(sgid->raw, 0, sizeof(sgid->raw));
122
123         memcpy(sgid->raw, dev->ha->primary_mac, sizeof (dev->ha->primary_mac));
124
125         QL_DPRINT12(ha, "exit\n");
126
127         return 0;
128 }
129
130 int
131 qlnxr_query_gid(struct ib_device *ibdev, u8 port, int index,
132         union ib_gid *sgid)
133 {
134         struct qlnxr_dev        *dev;
135         qlnx_host_t             *ha;
136
137         dev = get_qlnxr_dev(ibdev);
138         ha = dev->ha;
139         QL_DPRINT12(ha, "enter index: %d\n", index);
140 #if 0
141         int ret = 0;
142         /* @@@: if DEFINE_ROCE_GID_TABLE to be used here */
143         //if (!rdma_cap_roce_gid_table(ibdev, port)) {
144         if (!(rdma_protocol_roce(ibdev, port) &&
145                 ibdev->add_gid && ibdev->del_gid)) {
146                 QL_DPRINT11(ha, "acquire gid failed\n");
147                 return -ENODEV;
148         }
149
150         ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
151         if (ret == -EAGAIN) {
152                 memcpy(sgid, &zgid, sizeof(*sgid));
153                 return 0;
154         }
155 #endif
156         if ((index >= QLNXR_MAX_SGID) || (index < 0)) {
157                 QL_DPRINT12(ha, "invalid gid index %d\n", index);
158                 memset(sgid, 0, sizeof(*sgid));
159                 return -EINVAL;
160         }
161         memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
162
163         QL_DPRINT12(ha, "exit : %p\n", sgid);
164
165         return 0;
166 }
167
168 struct ib_srq *
169 qlnxr_create_srq(struct ib_pd *ibpd, struct ib_srq_init_attr *init_attr,
170         struct ib_udata *udata)
171 {
172         struct qlnxr_dev        *dev;
173         qlnx_host_t             *ha;
174         struct ecore_rdma_destroy_srq_in_params destroy_in_params;
175         struct ecore_rdma_create_srq_out_params out_params;
176         struct ecore_rdma_create_srq_in_params in_params;
177         u64 pbl_base_addr, phy_prod_pair_addr;
178         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
179         struct ib_ucontext *ib_ctx = NULL;
180         struct qlnxr_srq_hwq_info *hw_srq;
181         struct qlnxr_ucontext *ctx = NULL;
182         struct qlnxr_create_srq_ureq ureq;
183         u32 page_cnt, page_size;
184         struct qlnxr_srq *srq;
185         int ret = 0;
186
187         dev = get_qlnxr_dev((ibpd->device));
188         ha = dev->ha;
189
190         QL_DPRINT12(ha, "enter\n");
191
192         ret = qlnxr_check_srq_params(ibpd, dev, init_attr);
193
194         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
195         if (!srq) {
196                 QL_DPRINT11(ha, "cannot allocate memory for srq\n");
197                 return NULL; //@@@ : TODO what to return here?
198         }
199
200         srq->dev = dev;
201         hw_srq = &srq->hw_srq;
202         spin_lock_init(&srq->lock);
203         memset(&in_params, 0, sizeof(in_params));
204
205         if (udata && ibpd->uobject && ibpd->uobject->context) {
206                 ib_ctx = ibpd->uobject->context;
207                 ctx = get_qlnxr_ucontext(ib_ctx);
208
209                 memset(&ureq, 0, sizeof(ureq));
210                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
211                         udata->inlen))) {
212                         QL_DPRINT11(ha, "problem"
213                                 " copying data from user space\n");
214                         goto err0;
215                 }
216
217                 ret = qlnxr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
218                 if (ret)
219                         goto err0;
220
221                 page_cnt = srq->usrq.pbl_info.num_pbes;
222                 pbl_base_addr = srq->usrq.pbl_tbl->pa;
223                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
224                 // @@@ : if DEFINE_IB_UMEM_PAGE_SHIFT
225                 // page_size = BIT(srq->usrq.umem->page_shift);
226                 // else
227                 page_size = srq->usrq.umem->page_size;
228         } else {
229                 struct ecore_chain *pbl;
230                 ret = qlnxr_alloc_srq_kernel_params(srq, dev, init_attr);
231                 if (ret)
232                         goto err0;
233                 pbl = &hw_srq->pbl;
234
235                 page_cnt = ecore_chain_get_page_cnt(pbl);
236                 pbl_base_addr = ecore_chain_get_pbl_phys(pbl);
237                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
238                 page_size = pbl->elem_per_page << 4;
239         }
240
241         in_params.pd_id = pd->pd_id;
242         in_params.pbl_base_addr = pbl_base_addr;
243         in_params.prod_pair_addr = phy_prod_pair_addr;
244         in_params.num_pages = page_cnt;
245         in_params.page_size = page_size;
246
247         ret = ecore_rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
248         if (ret)
249                 goto err1;
250
251         srq->srq_id = out_params.srq_id;
252
253         if (udata) {
254                 ret = qlnxr_copy_srq_uresp(dev, srq, udata);
255                 if (ret)
256                         goto err2;
257         }
258
259         QL_DPRINT12(ha, "created srq with srq_id = 0x%0x\n", srq->srq_id);
260         return &srq->ibsrq;
261 err2:
262         memset(&in_params, 0, sizeof(in_params));
263         destroy_in_params.srq_id = srq->srq_id;
264         ecore_rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
265
266 err1:
267         if (udata)
268                 qlnxr_free_srq_user_params(srq);
269         else
270                 qlnxr_free_srq_kernel_params(srq);
271
272 err0:
273         kfree(srq);     
274         return ERR_PTR(-EFAULT);
275 }
276
277 int
278 qlnxr_destroy_srq(struct ib_srq *ibsrq)
279 {
280         struct qlnxr_dev        *dev;
281         struct qlnxr_srq        *srq;
282         qlnx_host_t             *ha;
283         struct ecore_rdma_destroy_srq_in_params in_params;
284
285         srq = get_qlnxr_srq(ibsrq);
286         dev = srq->dev;
287         ha = dev->ha;
288
289         memset(&in_params, 0, sizeof(in_params));
290         in_params.srq_id = srq->srq_id;
291
292         ecore_rdma_destroy_srq(dev->rdma_ctx, &in_params);
293
294         if (ibsrq->pd->uobject && ibsrq->pd->uobject->context)
295                 qlnxr_free_srq_user_params(srq);
296         else
297                 qlnxr_free_srq_kernel_params(srq);
298
299         QL_DPRINT12(ha, "destroyed srq_id=0x%0x\n", srq->srq_id);
300         kfree(srq);
301         return 0;
302 }
303
304 int
305 qlnxr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
306         enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
307 {
308         struct qlnxr_dev        *dev;
309         struct qlnxr_srq        *srq;
310         qlnx_host_t             *ha;
311         struct ecore_rdma_modify_srq_in_params in_params;
312         int ret = 0;
313
314         srq = get_qlnxr_srq(ibsrq);
315         dev = srq->dev;
316         ha = dev->ha;
317
318         QL_DPRINT12(ha, "enter\n");
319         if (attr_mask & IB_SRQ_MAX_WR) {
320                 QL_DPRINT12(ha, "invalid attribute mask=0x%x"
321                         " specified for %p\n", attr_mask, srq);
322                 return -EINVAL;
323         }
324
325         if (attr_mask & IB_SRQ_LIMIT) {
326                 if (attr->srq_limit >= srq->hw_srq.max_wr) {
327                         QL_DPRINT12(ha, "invalid srq_limit=0x%x"
328                                 " (max_srq_limit = 0x%x)\n",
329                                attr->srq_limit, srq->hw_srq.max_wr);
330                         return -EINVAL; 
331                 }
332                 memset(&in_params, 0, sizeof(in_params));
333                 in_params.srq_id = srq->srq_id;
334                 in_params.wqe_limit = attr->srq_limit;
335                 ret = ecore_rdma_modify_srq(dev->rdma_ctx, &in_params);
336                 if (ret)
337                         return ret;
338         }
339
340         QL_DPRINT12(ha, "modified srq with srq_id = 0x%0x\n", srq->srq_id);
341         return 0;
342 }
343
344 int
345 qlnxr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
346 {
347         struct qlnxr_dev        *dev;
348         struct qlnxr_srq        *srq;
349         qlnx_host_t             *ha;
350         struct ecore_rdma_device *qattr;
351         srq = get_qlnxr_srq(ibsrq);
352         dev = srq->dev;
353         ha = dev->ha;
354         //qattr = &dev->attr;
355         qattr = ecore_rdma_query_device(dev->rdma_ctx);
356         QL_DPRINT12(ha, "enter\n");
357
358         if (!dev->rdma_ctx) {
359                 QL_DPRINT12(ha, "called with invalid params"
360                         " rdma_ctx is NULL\n");
361                 return -EINVAL;
362         }
363
364         srq_attr->srq_limit = qattr->max_srq;
365         srq_attr->max_wr = qattr->max_srq_wr;
366         srq_attr->max_sge = qattr->max_sge;
367
368         QL_DPRINT12(ha, "exit\n");
369         return 0;
370 }
371
372 /* Increment srq wr producer by one */
373 static
374 void qlnxr_inc_srq_wr_prod (struct qlnxr_srq_hwq_info *info)
375 {
376         info->wr_prod_cnt++;
377 }
378
379 /* Increment srq wr consumer by one */
380 static 
381 void qlnxr_inc_srq_wr_cons(struct qlnxr_srq_hwq_info *info)
382 {
383         info->wr_cons_cnt++;
384 }
385
386 /* get_port_immutable verb is not available in FreeBSD */
387 #if 0
388 int
389 qlnxr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
390         struct ib_port_immutable *immutable)
391 {
392         struct qlnxr_dev                *dev;
393         qlnx_host_t                     *ha;
394         dev = get_qlnxr_dev(ibdev);
395         ha = dev->ha;
396
397         QL_DPRINT12(ha, "entered but not implemented!!!\n");
398 }
399 #endif
400
401 int
402 qlnxr_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
403         struct ib_recv_wr **bad_wr)
404 {
405         struct qlnxr_dev        *dev;
406         struct qlnxr_srq        *srq;
407         qlnx_host_t             *ha;
408         struct qlnxr_srq_hwq_info *hw_srq;
409         struct ecore_chain *pbl;
410         unsigned long flags;
411         int status = 0;
412         u32 num_sge, offset;
413
414         srq = get_qlnxr_srq(ibsrq);
415         dev = srq->dev;
416         ha = dev->ha;
417         hw_srq = &srq->hw_srq;
418
419         QL_DPRINT12(ha, "enter\n");
420         spin_lock_irqsave(&srq->lock, flags);
421
422         pbl = &srq->hw_srq.pbl;
423         while (wr) {
424                 struct rdma_srq_wqe_header *hdr;
425                 int i;
426
427                 if (!qlnxr_srq_elem_left(hw_srq) ||
428                     wr->num_sge > srq->hw_srq.max_sges) {
429                         QL_DPRINT11(ha, "WR cannot be posted"
430                             " (%d, %d) || (%d > %d)\n",
431                             hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
432                             wr->num_sge, srq->hw_srq.max_sges);
433                         status = -ENOMEM;
434                         *bad_wr = wr;
435                         break;
436                 }
437
438                 hdr = ecore_chain_produce(pbl);
439                 num_sge = wr->num_sge;
440                 /* Set number of sge and WR id in header */
441                 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
442
443                 /* PBL is maintained in case of WR granularity.
444                  * So increment WR producer in case we post a WR.
445                  */
446                 qlnxr_inc_srq_wr_prod(hw_srq);
447                 hw_srq->wqe_prod++;
448                 hw_srq->sge_prod++;
449
450                 QL_DPRINT12(ha, "SRQ WR : SGEs: %d with wr_id[%d] = %llx\n",
451                         wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
452
453                 for (i = 0; i < wr->num_sge; i++) {
454                         struct rdma_srq_sge *srq_sge = 
455                             ecore_chain_produce(pbl);
456                         /* Set SGE length, lkey and address */
457                         SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
458                                 wr->sg_list[i].length, wr->sg_list[i].lkey);
459
460                         QL_DPRINT12(ha, "[%d]: len %d, key %x, addr %x:%x\n",
461                                 i, srq_sge->length, srq_sge->l_key,
462                                 srq_sge->addr.hi, srq_sge->addr.lo);
463                         hw_srq->sge_prod++;
464                 }
465                 wmb();
466                 /*
467                  * SRQ prod is 8 bytes. Need to update SGE prod in index
468                  * in first 4 bytes and need to update WQE prod in next
469                  * 4 bytes.
470                  */
471                 *(srq->hw_srq.virt_prod_pair_addr) = hw_srq->sge_prod;
472                 offset = offsetof(struct rdma_srq_producers, wqe_prod);
473                 *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
474                         hw_srq->wqe_prod;
475                 /* Flush prod after updating it */
476                 wmb();
477                 wr = wr->next;
478         }       
479
480         QL_DPRINT12(ha, "Elements in SRQ: %d\n",
481                 ecore_chain_get_elem_left(pbl));
482
483         spin_unlock_irqrestore(&srq->lock, flags);      
484         QL_DPRINT12(ha, "exit\n");
485         return status;
486 }
487
488 int
489 #if __FreeBSD_version < 1102000
490 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
491 #else
492 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
493         struct ib_udata *udata)
494 #endif /* #if __FreeBSD_version < 1102000 */
495
496 {
497         struct qlnxr_dev                *dev;
498         struct ecore_rdma_device        *qattr;
499         qlnx_host_t                     *ha;
500
501         dev = get_qlnxr_dev(ibdev);
502         ha = dev->ha;
503
504         QL_DPRINT12(ha, "enter\n");
505
506 #if __FreeBSD_version > 1102000
507         if (udata->inlen || udata->outlen)
508                 return -EINVAL;
509 #endif /* #if __FreeBSD_version > 1102000 */
510
511         if (dev->rdma_ctx == NULL) {
512                 return -EINVAL;
513         }
514
515         qattr = ecore_rdma_query_device(dev->rdma_ctx);
516
517         memset(attr, 0, sizeof *attr);
518
519         attr->fw_ver = qattr->fw_ver;
520         attr->sys_image_guid = qattr->sys_image_guid;
521         attr->max_mr_size = qattr->max_mr_size;
522         attr->page_size_cap = qattr->page_size_caps;
523         attr->vendor_id = qattr->vendor_id;
524         attr->vendor_part_id = qattr->vendor_part_id;
525         attr->hw_ver = qattr->hw_ver;
526         attr->max_qp = qattr->max_qp;
527         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
528                                         IB_DEVICE_RC_RNR_NAK_GEN |
529                                         IB_DEVICE_LOCAL_DMA_LKEY |
530                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
531
532         attr->max_sge = qattr->max_sge;
533         attr->max_sge_rd = qattr->max_sge;
534         attr->max_cq = qattr->max_cq;
535         attr->max_cqe = qattr->max_cqe;
536         attr->max_mr = qattr->max_mr;
537         attr->max_mw = qattr->max_mw;
538         attr->max_pd = qattr->max_pd;
539         attr->atomic_cap = dev->atomic_cap;
540         attr->max_fmr = qattr->max_fmr;
541         attr->max_map_per_fmr = 16; /* TBD: FMR */
542
543         /* There is an implicit assumption in some of the ib_xxx apps that the
544          * qp_rd_atom is smaller than the qp_init_rd_atom. Specifically, in
545          * communication the qp_rd_atom is passed to the other side and used as
546          * init_rd_atom without check device capabilities for init_rd_atom.
547          * for this reason, we set the qp_rd_atom to be the minimum between the
548          * two...There is an additional assumption in mlx4 driver that the
549          * values are power of two, fls is performed on the value - 1, which
550          * in fact gives a larger power of two for values which are not a power
551          * of two. This should be fixed in mlx4 driver, but until then ->
552          * we provide a value that is a power of two in our code.
553          */
554         attr->max_qp_init_rd_atom =
555                 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
556         attr->max_qp_rd_atom =
557                 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
558                     attr->max_qp_init_rd_atom);
559
560         attr->max_srq = qattr->max_srq;
561         attr->max_srq_sge = qattr->max_srq_sge;
562         attr->max_srq_wr = qattr->max_srq_wr;
563
564         /* TODO: R&D to more properly configure the following */
565         attr->local_ca_ack_delay = qattr->dev_ack_delay;
566         attr->max_fast_reg_page_list_len = qattr->max_mr/8;
567         attr->max_pkeys = QLNXR_ROCE_PKEY_MAX;
568         attr->max_ah = qattr->max_ah;
569
570         QL_DPRINT12(ha, "exit\n");
571         return 0;
572 }
573
574 static inline void
575 get_link_speed_and_width(int speed, uint8_t *ib_speed, uint8_t *ib_width)
576 {
577         switch (speed) {
578         case 1000:
579                 *ib_speed = IB_SPEED_SDR;
580                 *ib_width = IB_WIDTH_1X;
581                 break;
582         case 10000:
583                 *ib_speed = IB_SPEED_QDR;
584                 *ib_width = IB_WIDTH_1X;
585                 break;
586
587         case 20000:
588                 *ib_speed = IB_SPEED_DDR;
589                 *ib_width = IB_WIDTH_4X;
590                 break;
591
592         case 25000:
593                 *ib_speed = IB_SPEED_EDR;
594                 *ib_width = IB_WIDTH_1X;
595                 break;
596
597         case 40000:
598                 *ib_speed = IB_SPEED_QDR;
599                 *ib_width = IB_WIDTH_4X;
600                 break;
601
602         case 50000:
603                 *ib_speed = IB_SPEED_QDR;
604                 *ib_width = IB_WIDTH_4X; // TODO doesn't add up to 50...
605                 break;
606
607         case 100000:
608                 *ib_speed = IB_SPEED_EDR;
609                 *ib_width = IB_WIDTH_4X;
610                 break;
611
612         default:
613                 /* Unsupported */
614                 *ib_speed = IB_SPEED_SDR;
615                 *ib_width = IB_WIDTH_1X;
616         }
617         return;
618 }
619
620 int
621 qlnxr_query_port(struct ib_device *ibdev, uint8_t port,
622         struct ib_port_attr *attr)
623 {
624         struct qlnxr_dev        *dev;
625         struct ecore_rdma_port  *rdma_port;
626         qlnx_host_t             *ha;
627
628         dev = get_qlnxr_dev(ibdev);
629         ha = dev->ha;
630
631         QL_DPRINT12(ha, "enter\n");
632
633         if (port > 1) {
634                 QL_DPRINT12(ha, "port [%d] > 1 \n", port);
635                 return -EINVAL;
636         }
637
638         if (dev->rdma_ctx == NULL) {
639                 QL_DPRINT12(ha, "rdma_ctx == NULL\n");
640                 return -EINVAL;
641         }
642
643         rdma_port = ecore_rdma_query_port(dev->rdma_ctx);
644         memset(attr, 0, sizeof *attr);
645
646         if (rdma_port->port_state == ECORE_RDMA_PORT_UP) {
647                 attr->state = IB_PORT_ACTIVE;
648                 attr->phys_state = 5;
649         } else {
650                 attr->state = IB_PORT_DOWN;
651                 attr->phys_state = 3;
652         }
653
654         attr->max_mtu = IB_MTU_4096;
655         attr->active_mtu = iboe_get_mtu(dev->ha->ifp->if_mtu);
656         attr->lid = 0;
657         attr->lmc = 0;
658         attr->sm_lid = 0;
659         attr->sm_sl = 0;
660         attr->port_cap_flags = 0;
661
662         if (QLNX_IS_IWARP(dev)) {
663                 attr->gid_tbl_len = 1;
664                 attr->pkey_tbl_len = 1;
665         } else {
666                 attr->gid_tbl_len = QLNXR_MAX_SGID;
667                 attr->pkey_tbl_len = QLNXR_ROCE_PKEY_TABLE_LEN;
668         }
669
670         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
671         attr->qkey_viol_cntr = 0;
672
673         get_link_speed_and_width(rdma_port->link_speed,
674                                  &attr->active_speed, &attr->active_width);
675
676         attr->max_msg_sz = rdma_port->max_msg_size;
677         attr->max_vl_num = 4; /* TODO -> figure this one out... */
678
679         QL_DPRINT12(ha, "state = %d phys_state = %d "
680                 " link_speed = %d active_speed = %d active_width = %d"
681                 " attr->gid_tbl_len = %d attr->pkey_tbl_len = %d"
682                 " max_msg_sz = 0x%x max_vl_num = 0x%x \n",
683                 attr->state, attr->phys_state,
684                 rdma_port->link_speed, attr->active_speed,
685                 attr->active_width, attr->gid_tbl_len, attr->pkey_tbl_len,
686                 attr->max_msg_sz, attr->max_vl_num);
687
688         QL_DPRINT12(ha, "exit\n");
689         return 0;
690 }
691
692 int
693 qlnxr_modify_port(struct ib_device *ibdev, uint8_t port, int mask,
694         struct ib_port_modify *props)
695 {
696         struct qlnxr_dev        *dev;
697         qlnx_host_t             *ha;
698
699         dev = get_qlnxr_dev(ibdev);
700         ha = dev->ha;
701
702         QL_DPRINT12(ha, "enter\n");
703
704         if (port > 1) {
705                 QL_DPRINT12(ha, "port (%d) > 1\n", port);
706                 return -EINVAL;
707         }
708
709         QL_DPRINT12(ha, "exit\n");
710         return 0;
711 }
712
713 enum rdma_link_layer
714 qlnxr_link_layer(struct ib_device *ibdev, uint8_t port_num)
715 {
716         struct qlnxr_dev        *dev;
717         qlnx_host_t             *ha;
718
719         dev = get_qlnxr_dev(ibdev);
720         ha = dev->ha;
721
722         QL_DPRINT12(ha, "ibdev = %p port_num = 0x%x\n", ibdev, port_num);
723
724         return IB_LINK_LAYER_ETHERNET;
725 }
726
727 struct ib_pd *
728 qlnxr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context,
729         struct ib_udata *udata)
730 {
731         struct qlnxr_pd         *pd = NULL;
732         u16                     pd_id;
733         int                     rc;
734         struct qlnxr_dev        *dev;
735         qlnx_host_t             *ha;
736
737         dev = get_qlnxr_dev(ibdev);
738         ha = dev->ha;
739
740         QL_DPRINT12(ha, "ibdev = %p context = %p"
741                 " udata = %p enter\n", ibdev, context, udata);
742
743         if (dev->rdma_ctx == NULL) {
744                 QL_DPRINT11(ha, "dev->rdma_ctx = NULL\n");
745                 rc = -1;
746                 goto err;
747         }
748
749         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
750         if (!pd) {
751                 rc = -ENOMEM;
752                 QL_DPRINT11(ha, "kzalloc(pd) = NULL\n");
753                 goto err;
754         }
755
756         rc = ecore_rdma_alloc_pd(dev->rdma_ctx, &pd_id);
757         if (rc) {
758                 QL_DPRINT11(ha, "ecore_rdma_alloc_pd failed\n");
759                 goto err;
760         }
761
762         pd->pd_id = pd_id;
763
764         if (udata && context) {
765
766                 rc = ib_copy_to_udata(udata, &pd->pd_id, sizeof(pd->pd_id));
767                 if (rc) {
768                         QL_DPRINT11(ha, "ib_copy_to_udata failed\n");
769                         ecore_rdma_free_pd(dev->rdma_ctx, pd_id);
770                         goto err;
771                 }
772
773                 pd->uctx = get_qlnxr_ucontext(context);
774                 pd->uctx->pd = pd;
775         }
776
777         atomic_add_rel_32(&dev->pd_count, 1);
778         QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
779                 pd, pd_id, dev->pd_count);
780
781         return &pd->ibpd;
782
783 err:
784         kfree(pd);
785         QL_DPRINT12(ha, "exit -1\n");
786         return ERR_PTR(rc);
787 }
788
789 int
790 qlnxr_dealloc_pd(struct ib_pd *ibpd)
791 {
792         struct qlnxr_pd         *pd;
793         struct qlnxr_dev        *dev;
794         qlnx_host_t             *ha;
795
796         pd = get_qlnxr_pd(ibpd);
797         dev = get_qlnxr_dev((ibpd->device));
798         ha = dev->ha;
799
800         QL_DPRINT12(ha, "enter\n");
801
802         if (pd == NULL) {
803                 QL_DPRINT11(ha, "pd = NULL\n");
804         } else {
805                 ecore_rdma_free_pd(dev->rdma_ctx, pd->pd_id);
806                 kfree(pd);
807                 atomic_subtract_rel_32(&dev->pd_count, 1);
808                 QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
809                         pd, pd->pd_id, dev->pd_count);
810         }
811
812         QL_DPRINT12(ha, "exit\n");
813         return 0;
814 }
815
816 #define ROCE_WQE_ELEM_SIZE      sizeof(struct rdma_sq_sge)
817 #define RDMA_MAX_SGE_PER_SRQ    (4) /* Should be part of HSI */
818 /* Should be part of HSI */
819 #define RDMA_MAX_SRQ_WQE_SIZE   (RDMA_MAX_SGE_PER_SRQ + 1) /* +1 for header */
820 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
821
822 static void qlnxr_cleanup_user(struct qlnxr_dev *, struct qlnxr_qp *);
823 static void qlnxr_cleanup_kernel(struct qlnxr_dev *, struct qlnxr_qp *);
824
825 int
826 qlnxr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
827 {
828         struct qlnxr_dev        *dev;
829         qlnx_host_t             *ha;
830
831         dev = get_qlnxr_dev(ibdev);
832         ha = dev->ha;
833
834         QL_DPRINT12(ha, "enter index = 0x%x\n", index);
835
836         if (index > QLNXR_ROCE_PKEY_TABLE_LEN) 
837                 return -EINVAL;
838
839         *pkey = QLNXR_ROCE_PKEY_DEFAULT;
840
841         QL_DPRINT12(ha, "exit\n");
842         return 0;
843 }
844
845
846 static inline bool
847 qlnxr_get_vlan_id_qp(qlnx_host_t *ha, struct ib_qp_attr *attr, int attr_mask,
848        u16 *vlan_id)
849 {
850         bool ret = false;
851
852         QL_DPRINT12(ha, "enter \n");
853
854         *vlan_id = 0;
855
856 #if __FreeBSD_version >= 1100000
857         u16 tmp_vlan_id;
858
859 #if __FreeBSD_version >= 1102000
860         union ib_gid *dgid;
861
862         dgid = &attr->ah_attr.grh.dgid;
863         tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
864
865         if (!(tmp_vlan_id & ~EVL_VLID_MASK)) {
866                 *vlan_id = tmp_vlan_id;
867                 ret = true;
868         }
869 #else
870         tmp_vlan_id = attr->vlan_id;
871
872         if ((attr_mask & IB_QP_VID) && (!(tmp_vlan_id & ~EVL_VLID_MASK))) {
873                 *vlan_id = tmp_vlan_id;
874                 ret = true;
875         }
876
877 #endif /* #if __FreeBSD_version > 1102000 */
878
879 #else
880         ret = true;
881
882 #endif /* #if __FreeBSD_version >= 1100000 */
883
884         QL_DPRINT12(ha, "exit vlan_id = 0x%x ret = %d \n", *vlan_id, ret);
885
886         return (ret);
887 }
888
889 static inline void
890 get_gid_info(struct ib_qp *ibqp, struct ib_qp_attr *attr,
891         int attr_mask,
892         struct qlnxr_dev *dev,
893         struct qlnxr_qp *qp,
894         struct ecore_rdma_modify_qp_in_params *qp_params)
895 {
896         int             i;
897         qlnx_host_t     *ha;
898
899         ha = dev->ha;
900
901         QL_DPRINT12(ha, "enter\n");
902
903         memcpy(&qp_params->sgid.bytes[0],
904                &dev->sgid_tbl[qp->sgid_idx].raw[0],
905                sizeof(qp_params->sgid.bytes));
906         memcpy(&qp_params->dgid.bytes[0],
907                &attr->ah_attr.grh.dgid.raw[0],
908                sizeof(qp_params->dgid));
909
910         qlnxr_get_vlan_id_qp(ha, attr, attr_mask, &qp_params->vlan_id);
911
912         for (i = 0; i < (sizeof(qp_params->sgid.dwords)/sizeof(uint32_t)); i++) {
913                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
914                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
915         }
916
917         QL_DPRINT12(ha, "exit\n");
918         return;
919 }
920
921
922
923 static int
924 qlnxr_add_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
925 {
926         struct qlnxr_mm *mm;
927         qlnx_host_t     *ha;
928
929         ha = uctx->dev->ha;
930
931         QL_DPRINT12(ha, "enter\n");
932
933         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
934         if (mm == NULL) {
935                 QL_DPRINT11(ha, "mm = NULL\n");
936                 return -ENOMEM;
937         }
938
939         mm->key.phy_addr = phy_addr;
940
941         /* This function might be called with a length which is not a multiple
942          * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
943          * forces this granularity by increasing the requested size if needed.
944          * When qedr_mmap is called, it will search the list with the updated
945          * length as a key. To prevent search failures, the length is rounded up
946          * in advance to PAGE_SIZE.
947          */
948         mm->key.len = roundup(len, PAGE_SIZE);
949         INIT_LIST_HEAD(&mm->entry);
950
951         mutex_lock(&uctx->mm_list_lock);
952         list_add(&mm->entry, &uctx->mm_head);
953         mutex_unlock(&uctx->mm_list_lock);
954
955         QL_DPRINT12(ha, "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
956                 (unsigned long long)mm->key.phy_addr,
957                 (unsigned long)mm->key.len, uctx);
958
959         return 0;
960 }
961
962 static bool
963 qlnxr_search_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
964 {
965         bool            found = false;
966         struct qlnxr_mm *mm;
967         qlnx_host_t     *ha;
968
969         ha = uctx->dev->ha;
970
971         QL_DPRINT12(ha, "enter\n");
972
973         mutex_lock(&uctx->mm_list_lock);
974         list_for_each_entry(mm, &uctx->mm_head, entry) {
975                 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
976                         continue;
977
978                 found = true;
979                 break;
980         }
981         mutex_unlock(&uctx->mm_list_lock);
982
983         QL_DPRINT12(ha,
984                 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, found=%d\n",
985                 mm->key.phy_addr, mm->key.len, uctx, found);
986
987         return found;
988 }
989
990 struct
991 ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
992                 struct ib_udata *udata)
993 {
994         int rc;
995         struct qlnxr_ucontext *ctx;
996         struct qlnxr_alloc_ucontext_resp uresp;
997         struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
998         qlnx_host_t *ha = dev->ha;
999         struct ecore_rdma_add_user_out_params oparams;
1000
1001         if (!udata) {
1002                 return ERR_PTR(-EFAULT);
1003         }
1004
1005         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1006         if (!ctx)
1007                 return ERR_PTR(-ENOMEM);
1008
1009         rc = ecore_rdma_add_user(dev->rdma_ctx, &oparams);
1010         if (rc) {
1011                 QL_DPRINT12(ha,
1012                         "Failed to allocate a DPI for a new RoCE application "
1013                         ",rc = %d. To overcome this, consider to increase "
1014                         "the number of DPIs, increase the doorbell BAR size "
1015                         "or just close unnecessary RoCE applications. In "
1016                         "order to increase the number of DPIs consult the "
1017                         "README\n", rc);
1018                 goto err;
1019         }
1020
1021         ctx->dpi = oparams.dpi;
1022         ctx->dpi_addr = oparams.dpi_addr;
1023         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
1024         ctx->dpi_size = oparams.dpi_size;
1025         INIT_LIST_HEAD(&ctx->mm_head);
1026         mutex_init(&ctx->mm_list_lock);
1027
1028         memset(&uresp, 0, sizeof(uresp));
1029         uresp.dpm_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, dpm_enabled)
1030                                 < udata->outlen ? dev->user_dpm_enabled : 0; //TODO: figure this out
1031         uresp.wids_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, wids_enabled)
1032                                 < udata->outlen ? 1 : 0; //TODO: figure this out
1033         uresp.wid_count = offsetof(struct qlnxr_alloc_ucontext_resp, wid_count)
1034                                 < udata->outlen ? oparams.wid_count : 0; //TODO: figure this out 
1035         uresp.db_pa = ctx->dpi_phys_addr;
1036         uresp.db_size = ctx->dpi_size;
1037         uresp.max_send_wr = dev->attr.max_sqe;
1038         uresp.max_recv_wr = dev->attr.max_rqe;
1039         uresp.max_srq_wr = dev->attr.max_srq_wr;
1040         uresp.sges_per_send_wr = QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
1041         uresp.sges_per_recv_wr = QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
1042         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
1043         uresp.max_cqes = QLNXR_MAX_CQES;
1044         
1045         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1046         if (rc)
1047                 goto err;
1048
1049         ctx->dev = dev;
1050
1051         rc = qlnxr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
1052         if (rc)
1053                 goto err;
1054         QL_DPRINT12(ha, "Allocated user context %p\n",
1055                 &ctx->ibucontext);
1056         
1057         return &ctx->ibucontext;
1058 err:
1059         kfree(ctx);
1060         return ERR_PTR(rc);
1061 }
1062
1063 int
1064 qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx)
1065 {
1066         struct qlnxr_ucontext *uctx = get_qlnxr_ucontext(ibctx);
1067         struct qlnxr_dev *dev = uctx->dev;
1068         qlnx_host_t *ha = dev->ha;
1069         struct qlnxr_mm *mm, *tmp;
1070         int status = 0;
1071
1072         QL_DPRINT12(ha, "Deallocating user context %p\n",
1073                         uctx);
1074
1075         if (dev) {
1076                 ecore_rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
1077         }
1078
1079         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
1080                 QL_DPRINT12(ha, "deleted addr= 0x%llx, len = 0x%lx for"
1081                                 " ctx=%p\n",
1082                                 mm->key.phy_addr, mm->key.len, uctx);
1083                 list_del(&mm->entry);
1084                 kfree(mm);
1085         }
1086         kfree(uctx);
1087         return status;
1088 }
1089
1090 int
1091 qlnxr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1092 {
1093         struct qlnxr_ucontext   *ucontext = get_qlnxr_ucontext(context);
1094         struct qlnxr_dev        *dev = get_qlnxr_dev((context->device));
1095         unsigned long           vm_page = vma->vm_pgoff << PAGE_SHIFT;
1096         u64                     unmapped_db;
1097         unsigned long           len = (vma->vm_end - vma->vm_start);
1098         int                     rc = 0;
1099         bool                    found;
1100         qlnx_host_t             *ha;
1101
1102         ha = dev->ha;
1103
1104 #if __FreeBSD_version > 1102000
1105         unmapped_db = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
1106 #else
1107         unmapped_db = dev->db_phys_addr;
1108 #endif /* #if __FreeBSD_version > 1102000 */
1109
1110         QL_DPRINT12(ha, "qedr_mmap enter vm_page=0x%lx"
1111                 " vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
1112                 vm_page, vma->vm_pgoff, unmapped_db,
1113                 dev->db_size, len);
1114
1115         if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
1116                 QL_DPRINT11(ha, "Vma_start not page aligned "
1117                         "vm_start = %ld vma_end = %ld\n", vma->vm_start,
1118                         vma->vm_end);
1119                 return -EINVAL;
1120         }
1121
1122         found = qlnxr_search_mmap(ucontext, vm_page, len);
1123         if (!found) {
1124                 QL_DPRINT11(ha, "Vma_pgoff not found in mapped array = %ld\n",
1125                         vma->vm_pgoff);
1126                 return -EINVAL;
1127         }
1128
1129         QL_DPRINT12(ha, "Mapping doorbell bar\n");
1130
1131 #if __FreeBSD_version > 1102000
1132
1133         if ((vm_page < unmapped_db) ||
1134                 ((vm_page + len) > (unmapped_db + ucontext->dpi_size))) {
1135                 QL_DPRINT11(ha, "failed pages are outside of dpi;"
1136                         "page address=0x%lx, unmapped_db=0x%lx, dpi_size=0x%x\n",
1137                         vm_page, unmapped_db, ucontext->dpi_size);
1138                 return -EINVAL;
1139         }
1140
1141         if (vma->vm_flags & VM_READ) {
1142                 QL_DPRINT11(ha, "failed mmap, cannot map doorbell bar for read\n");
1143                 return -EINVAL;
1144         }
1145
1146         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1147         rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
1148                         vma->vm_page_prot);
1149
1150 #else
1151
1152         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
1153                 dev->db_size))) {
1154
1155                 QL_DPRINT12(ha, "Mapping doorbell bar\n");
1156
1157                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1158
1159                 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1160                                             PAGE_SIZE, vma->vm_page_prot);
1161         } else {
1162                 QL_DPRINT12(ha, "Mapping chains\n");
1163                 rc = io_remap_pfn_range(vma, vma->vm_start,
1164                                          vma->vm_pgoff, len, vma->vm_page_prot);
1165         }
1166
1167 #endif /* #if __FreeBSD_version > 1102000 */
1168
1169         QL_DPRINT12(ha, "exit [%d]\n", rc);
1170         return rc;
1171 }
1172
1173 struct ib_mr *
1174 qlnxr_get_dma_mr(struct ib_pd *ibpd, int acc)
1175 {
1176         struct qlnxr_mr         *mr;
1177         struct qlnxr_dev        *dev = get_qlnxr_dev((ibpd->device));
1178         struct qlnxr_pd         *pd = get_qlnxr_pd(ibpd);
1179         int                     rc;
1180         qlnx_host_t             *ha;
1181
1182         ha = dev->ha;
1183
1184         QL_DPRINT12(ha, "enter\n");
1185
1186         if (acc & IB_ACCESS_MW_BIND) {
1187                 QL_DPRINT12(ha, "Unsupported access flags received for dma mr\n");
1188         }
1189
1190         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1191         if (!mr) {
1192                 rc = -ENOMEM;
1193                 QL_DPRINT12(ha, "kzalloc(mr) failed %d\n", rc);
1194                 goto err0;
1195         }
1196
1197         mr->type = QLNXR_MR_DMA;
1198
1199         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1200         if (rc) {
1201                 QL_DPRINT12(ha, "ecore_rdma_alloc_tid failed %d\n", rc);
1202                 goto err1;
1203         }
1204
1205         /* index only, 18 bit long, lkey = itid << 8 | key */
1206         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1207         mr->hw_mr.pd = pd->pd_id;
1208         mr->hw_mr.local_read = 1;
1209         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1210         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1211         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1212         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1213         mr->hw_mr.dma_mr = true;
1214
1215         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1216         if (rc) {
1217                 QL_DPRINT12(ha, "ecore_rdma_register_tid failed %d\n", rc);
1218                 goto err2;
1219         }
1220
1221         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1222
1223         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1224                 mr->hw_mr.remote_atomic) {
1225                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1226         }
1227
1228         QL_DPRINT12(ha, "lkey = %x\n", mr->ibmr.lkey);
1229
1230         return &mr->ibmr;
1231
1232 err2:
1233         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1234 err1:
1235         kfree(mr);
1236 err0:
1237         QL_DPRINT12(ha, "exit [%d]\n", rc);
1238
1239         return ERR_PTR(rc);
1240 }
1241
1242 static void
1243 qlnxr_free_pbl(struct qlnxr_dev *dev, struct qlnxr_pbl_info *pbl_info,
1244         struct qlnxr_pbl *pbl)
1245 {
1246         int             i;
1247         qlnx_host_t     *ha;
1248
1249         ha = dev->ha;
1250
1251         QL_DPRINT12(ha, "enter\n");
1252
1253         for (i = 0; i < pbl_info->num_pbls; i++) {
1254                 if (!pbl[i].va)
1255                         continue;
1256                 qlnx_dma_free_coherent(&dev->ha->cdev, pbl[i].va, pbl[i].pa,
1257                         pbl_info->pbl_size);
1258         }
1259         kfree(pbl);
1260
1261         QL_DPRINT12(ha, "exit\n");
1262         return;
1263 }
1264
1265 #define MIN_FW_PBL_PAGE_SIZE (4*1024)
1266 #define MAX_FW_PBL_PAGE_SIZE (64*1024)
1267
1268 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
1269 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
1270 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE*MAX_PBES_ON_PAGE)
1271
1272 static struct qlnxr_pbl *
1273 qlnxr_alloc_pbl_tbl(struct qlnxr_dev *dev,
1274         struct qlnxr_pbl_info *pbl_info, gfp_t flags)
1275 {
1276         void                    *va;
1277         dma_addr_t              pa;
1278         dma_addr_t              *pbl_main_tbl;
1279         struct qlnxr_pbl        *pbl_table;
1280         int                     i, rc = 0;
1281         qlnx_host_t             *ha;
1282
1283         ha = dev->ha;
1284
1285         QL_DPRINT12(ha, "enter\n");
1286
1287         pbl_table = kzalloc(sizeof(*pbl_table) * pbl_info->num_pbls, flags);
1288
1289         if (!pbl_table) {
1290                 QL_DPRINT12(ha, "pbl_table = NULL\n");
1291                 return NULL;
1292         }
1293
1294         for (i = 0; i < pbl_info->num_pbls; i++) {
1295                 va = qlnx_dma_alloc_coherent(&dev->ha->cdev, &pa, pbl_info->pbl_size);
1296                 if (!va) {
1297                         QL_DPRINT11(ha, "Failed to allocate pbl#%d\n", i);
1298                         rc = -ENOMEM;
1299                         goto err;
1300                 }
1301                 memset(va, 0, pbl_info->pbl_size);
1302                 pbl_table[i].va = va;
1303                 pbl_table[i].pa = pa;
1304         }
1305
1306         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
1307          * the first one with physical pointers to all of the rest
1308          */
1309         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
1310         for (i = 0; i < pbl_info->num_pbls - 1; i++)
1311                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
1312
1313         QL_DPRINT12(ha, "exit\n");
1314         return pbl_table;
1315
1316 err:
1317         qlnxr_free_pbl(dev, pbl_info, pbl_table);
1318
1319         QL_DPRINT12(ha, "exit with error\n");
1320         return NULL;
1321 }
1322
1323 static int
1324 qlnxr_prepare_pbl_tbl(struct qlnxr_dev *dev,
1325         struct qlnxr_pbl_info *pbl_info,
1326         u32 num_pbes,
1327         int two_layer_capable)
1328 {
1329         u32             pbl_capacity;
1330         u32             pbl_size;
1331         u32             num_pbls;
1332         qlnx_host_t     *ha;
1333
1334         ha = dev->ha;
1335
1336         QL_DPRINT12(ha, "enter\n");
1337
1338         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
1339                 if (num_pbes > MAX_PBES_TWO_LAYER) {
1340                         QL_DPRINT11(ha, "prepare pbl table: too many pages %d\n",
1341                                 num_pbes);
1342                         return -EINVAL;
1343                 }
1344
1345                 /* calculate required pbl page size */
1346                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
1347                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
1348                         NUM_PBES_ON_PAGE(pbl_size);
1349
1350                 while (pbl_capacity < num_pbes) {
1351                         pbl_size *= 2;
1352                         pbl_capacity = pbl_size / sizeof(u64);
1353                         pbl_capacity = pbl_capacity * pbl_capacity;
1354                 }
1355
1356                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
1357                 num_pbls++; /* One for the layer0 ( points to the pbls) */
1358                 pbl_info->two_layered = true;
1359         } else {
1360                 /* One layered PBL */
1361                 num_pbls = 1;
1362                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, \
1363                                 roundup_pow_of_two((num_pbes * sizeof(u64))));
1364                 pbl_info->two_layered = false;
1365         }
1366
1367         pbl_info->num_pbls = num_pbls;
1368         pbl_info->pbl_size = pbl_size;
1369         pbl_info->num_pbes = num_pbes;
1370
1371         QL_DPRINT12(ha, "prepare pbl table: num_pbes=%d, num_pbls=%d pbl_size=%d\n",
1372                 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
1373
1374         return 0;
1375 }
1376
1377 #define upper_32_bits(x) (uint32_t)(x >> 32)
1378 #define lower_32_bits(x) (uint32_t)(x)
1379
1380 static void
1381 qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
1382         struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
1383 {
1384         struct regpair          *pbe;
1385         struct qlnxr_pbl        *pbl_tbl;
1386         struct scatterlist      *sg;
1387         int                     shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
1388         qlnx_host_t             *ha;
1389
1390 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1391         int                     i;
1392         struct                  ib_umem_chunk *chunk = NULL;
1393 #else
1394         int                     entry;
1395 #endif
1396
1397
1398         ha = dev->ha;
1399
1400         QL_DPRINT12(ha, "enter\n");
1401
1402         if (!pbl_info) {
1403                 QL_DPRINT11(ha, "PBL_INFO not initialized\n");
1404                 return;
1405         }
1406
1407         if (!pbl_info->num_pbes) {
1408                 QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
1409                 return;
1410         }
1411
1412         /* If we have a two layered pbl, the first pbl points to the rest
1413          * of the pbls and the first entry lays on the second pbl in the table
1414          */
1415         if (pbl_info->two_layered)
1416                 pbl_tbl = &pbl[1];
1417         else
1418                 pbl_tbl = pbl;
1419
1420         pbe = (struct regpair *)pbl_tbl->va;
1421         if (!pbe) {
1422                 QL_DPRINT12(ha, "pbe is NULL\n");
1423                 return;
1424         }
1425
1426         pbe_cnt = 0;
1427
1428         shift = ilog2(umem->page_size);
1429
1430 #ifndef DEFINE_IB_UMEM_WITH_CHUNK
1431
1432         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
1433
1434 #else
1435         list_for_each_entry(chunk, &umem->chunk_list, list) {
1436                 /* get all the dma regions from the chunk. */
1437                 for (i = 0; i < chunk->nmap; i++) {
1438                         sg = &chunk->page_list[i];
1439 #endif
1440                         pages = sg_dma_len(sg) >> shift;
1441                         for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
1442                                 /* store the page address in pbe */
1443                                 pbe->lo =
1444                                     cpu_to_le32(sg_dma_address(sg) +
1445                                                 (umem->page_size * pg_cnt));
1446                                 pbe->hi =
1447                                     cpu_to_le32(upper_32_bits
1448                                                 ((sg_dma_address(sg) +
1449                                                   umem->page_size * pg_cnt)));
1450
1451                                 QL_DPRINT12(ha,
1452                                         "Populate pbl table:"
1453                                         " pbe->addr=0x%x:0x%x "
1454                                         " pbe_cnt = %d total_num_pbes=%d"
1455                                         " pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
1456                                         total_num_pbes, pbe);
1457
1458                                 pbe_cnt ++;
1459                                 total_num_pbes ++;
1460                                 pbe++;
1461
1462                                 if (total_num_pbes == pbl_info->num_pbes)
1463                                         return;
1464
1465                                 /* if the given pbl is full storing the pbes,
1466                                  * move to next pbl.
1467                                  */
1468                                 if (pbe_cnt ==
1469                                         (pbl_info->pbl_size / sizeof(u64))) {
1470                                         pbl_tbl++;
1471                                         pbe = (struct regpair *)pbl_tbl->va;
1472                                         pbe_cnt = 0;
1473                                 }
1474                         }
1475 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1476                 }
1477 #endif
1478         }
1479         QL_DPRINT12(ha, "exit\n");
1480         return;
1481 }
1482
1483 static void
1484 free_mr_info(struct qlnxr_dev *dev, struct mr_info *info)
1485 {
1486         struct qlnxr_pbl *pbl, *tmp;
1487         qlnx_host_t             *ha;
1488
1489         ha = dev->ha;
1490
1491         QL_DPRINT12(ha, "enter\n");
1492
1493         if (info->pbl_table)
1494                 list_add_tail(&info->pbl_table->list_entry,
1495                               &info->free_pbl_list);
1496
1497         if (!list_empty(&info->inuse_pbl_list))
1498                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
1499
1500         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
1501                 list_del(&pbl->list_entry);
1502                 qlnxr_free_pbl(dev, &info->pbl_info, pbl);
1503         }
1504         QL_DPRINT12(ha, "exit\n");
1505
1506         return;
1507 }
1508
1509 static int
1510 qlnxr_init_mr_info(struct qlnxr_dev *dev, struct mr_info *info,
1511         size_t page_list_len, bool two_layered)
1512 {
1513         int                     rc;
1514         struct qlnxr_pbl        *tmp;
1515         qlnx_host_t             *ha;
1516
1517         ha = dev->ha;
1518
1519         QL_DPRINT12(ha, "enter\n");
1520
1521         INIT_LIST_HEAD(&info->free_pbl_list);
1522         INIT_LIST_HEAD(&info->inuse_pbl_list);
1523
1524         rc = qlnxr_prepare_pbl_tbl(dev, &info->pbl_info,
1525                                   page_list_len, two_layered);
1526         if (rc) {
1527                 QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl [%d]\n", rc);
1528                 goto done;
1529         }
1530
1531         info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1532
1533         if (!info->pbl_table) {
1534                 rc = -ENOMEM;
1535                 QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl returned NULL\n");
1536                 goto done;
1537         }
1538
1539         QL_DPRINT12(ha, "pbl_table_pa = %pa\n", &info->pbl_table->pa);
1540
1541         /* in usual case we use 2 PBLs, so we add one to free
1542          * list and allocating another one
1543          */
1544         tmp = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1545
1546         if (!tmp) {
1547                 QL_DPRINT11(ha, "Extra PBL is not allocated\n");
1548                 goto done; /* it's OK if second allocation fails, so rc = 0*/
1549         }
1550
1551         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
1552
1553         QL_DPRINT12(ha, "extra pbl_table_pa = %pa\n", &tmp->pa);
1554
1555 done:
1556         if (rc)
1557                 free_mr_info(dev, info);
1558
1559         QL_DPRINT12(ha, "exit [%d]\n", rc);
1560
1561         return rc;
1562 }
1563
1564
1565 struct ib_mr *
1566 #if __FreeBSD_version >= 1102000
1567 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1568         u64 usr_addr, int acc, struct ib_udata *udata)
1569 #else
1570 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1571         u64 usr_addr, int acc, struct ib_udata *udata, int mr_id)
1572 #endif /* #if __FreeBSD_version >= 1102000 */
1573 {
1574         int             rc = -ENOMEM;
1575         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
1576         struct qlnxr_mr *mr;
1577         struct qlnxr_pd *pd;
1578         qlnx_host_t     *ha;
1579
1580         ha = dev->ha;
1581
1582         QL_DPRINT12(ha, "enter\n");
1583
1584         pd = get_qlnxr_pd(ibpd);
1585
1586         QL_DPRINT12(ha, "qedr_register user mr pd = %d"
1587                 " start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
1588                 pd->pd_id, start, len, usr_addr, acc);
1589
1590         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
1591                 QL_DPRINT11(ha,
1592                         "(acc & IB_ACCESS_REMOTE_WRITE &&"
1593                         " !(acc & IB_ACCESS_LOCAL_WRITE))\n");
1594                 return ERR_PTR(-EINVAL);
1595         }
1596
1597         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1598         if (!mr) {
1599                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
1600                 return ERR_PTR(rc);
1601         }
1602
1603         mr->type = QLNXR_MR_USER;
1604
1605         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
1606         if (IS_ERR(mr->umem)) {
1607                 rc = -EFAULT;
1608                 QL_DPRINT11(ha, "ib_umem_get failed [%p]\n", mr->umem);
1609                 goto err0;
1610         }
1611
1612         rc = qlnxr_init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
1613         if (rc) {
1614                 QL_DPRINT11(ha,
1615                         "qlnxr_init_mr_info failed [%d]\n", rc);
1616                 goto err1;
1617         }
1618
1619         qlnxr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
1620                            &mr->info.pbl_info);
1621
1622         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1623
1624         if (rc) {
1625                 QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
1626                 goto err1;
1627         }
1628
1629         /* index only, 18 bit long, lkey = itid << 8 | key */
1630         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1631         mr->hw_mr.key = 0;
1632         mr->hw_mr.pd = pd->pd_id;
1633         mr->hw_mr.local_read = 1;
1634         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1635         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1636         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1637         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1638         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
1639         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
1640         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
1641         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
1642         mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); /* for the MR pages */
1643
1644 #if __FreeBSD_version >= 1102000
1645         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
1646 #else
1647         mr->hw_mr.fbo = mr->umem->offset;
1648 #endif
1649         mr->hw_mr.length = len;
1650         mr->hw_mr.vaddr = usr_addr;
1651         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
1652         mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
1653         mr->hw_mr.dma_mr = false;
1654
1655         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1656         if (rc) {
1657                 QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
1658                 goto err2;
1659         }
1660
1661         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1662         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1663                 mr->hw_mr.remote_atomic)
1664                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1665
1666         QL_DPRINT12(ha, "register user mr lkey: %x\n", mr->ibmr.lkey);
1667
1668         return (&mr->ibmr);
1669
1670 err2:
1671         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1672 err1:
1673         qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1674 err0:
1675         kfree(mr);
1676
1677         QL_DPRINT12(ha, "exit [%d]\n", rc);
1678         return (ERR_PTR(rc));
1679 }
1680
1681 int
1682 qlnxr_dereg_mr(struct ib_mr *ib_mr)
1683 {
1684         struct qlnxr_mr *mr = get_qlnxr_mr(ib_mr);
1685         struct qlnxr_dev *dev = get_qlnxr_dev((ib_mr->device));
1686         int             rc = 0;
1687         qlnx_host_t     *ha;
1688
1689         ha = dev->ha;
1690
1691         QL_DPRINT12(ha, "enter\n");
1692
1693         if ((mr->type != QLNXR_MR_DMA) && (mr->type != QLNXR_MR_FRMR))
1694                 qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1695
1696         /* it could be user registered memory. */
1697         if (mr->umem)
1698                 ib_umem_release(mr->umem);
1699
1700         kfree(mr->pages);
1701
1702         kfree(mr);
1703
1704         QL_DPRINT12(ha, "exit\n");
1705         return rc;
1706 }
1707
1708 static int
1709 qlnxr_copy_cq_uresp(struct qlnxr_dev *dev,
1710         struct qlnxr_cq *cq, struct ib_udata *udata)
1711 {
1712         struct qlnxr_create_cq_uresp    uresp;
1713         int                             rc;
1714         qlnx_host_t                     *ha;
1715
1716         ha = dev->ha;
1717
1718         QL_DPRINT12(ha, "enter\n");
1719
1720         memset(&uresp, 0, sizeof(uresp));
1721
1722         uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1723         uresp.icid = cq->icid;
1724
1725         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1726
1727         if (rc) {
1728                 QL_DPRINT12(ha, "ib_copy_to_udata error cqid=0x%x[%d]\n",
1729                         cq->icid, rc);
1730         }
1731
1732         QL_DPRINT12(ha, "exit [%d]\n", rc);
1733         return rc;
1734 }
1735
1736 static void
1737 consume_cqe(struct qlnxr_cq *cq)
1738 {
1739
1740         if (cq->latest_cqe == cq->toggle_cqe)
1741                 cq->pbl_toggle ^= RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1742
1743         cq->latest_cqe = ecore_chain_consume(&cq->pbl);
1744 }
1745
1746 static inline int
1747 qlnxr_align_cq_entries(int entries)
1748 {
1749         u64 size, aligned_size;
1750
1751         /* We allocate an extra entry that we don't report to the FW.
1752          * Why?
1753          * The CQE size is 32 bytes but the FW writes in chunks of 64 bytes
1754          * (for performance purposes). Allocating an extra entry and telling
1755          * the FW we have less prevents overwriting the first entry in case of
1756          * a wrap i.e. when the FW writes the last entry and the application
1757          * hasn't read the first one.
1758          */
1759         size = (entries + 1) * QLNXR_CQE_SIZE;
1760
1761         /* We align to PAGE_SIZE.
1762          * Why?
1763          * Since the CQ is going to be mapped and the mapping is anyhow in whole
1764          * kernel pages we benefit from the possibly extra CQEs.
1765          */
1766         aligned_size = ALIGN(size, PAGE_SIZE);
1767
1768         /* note: for CQs created in user space the result of this function
1769          * should match the size mapped in user space
1770          */
1771         return (aligned_size / QLNXR_CQE_SIZE);
1772 }
1773
1774 static inline int
1775 qlnxr_init_user_queue(struct ib_ucontext *ib_ctx, struct qlnxr_dev *dev,
1776         struct qlnxr_userq *q, u64 buf_addr, size_t buf_len,
1777         int access, int dmasync, int alloc_and_init)
1778 {
1779         int             page_cnt;
1780         int             rc;
1781         qlnx_host_t     *ha;
1782
1783         ha = dev->ha;
1784
1785         QL_DPRINT12(ha, "enter\n");
1786
1787         q->buf_addr = buf_addr;
1788         q->buf_len = buf_len;
1789
1790         QL_DPRINT12(ha, "buf_addr : %llx, buf_len : %x, access : %x"
1791               " dmasync : %x\n", q->buf_addr, q->buf_len,
1792                 access, dmasync);       
1793
1794         q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
1795
1796         if (IS_ERR(q->umem)) {
1797                 QL_DPRINT11(ha, "ib_umem_get failed [%lx]\n", PTR_ERR(q->umem));
1798                 return PTR_ERR(q->umem);
1799         }
1800
1801         page_cnt = ib_umem_page_count(q->umem);
1802         rc = qlnxr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt,
1803                                   0 /* SQ and RQ don't support dual layer pbl.
1804                                      * CQ may, but this is yet uncoded.
1805                                      */);
1806         if (rc) {
1807                 QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl failed [%d]\n", rc);
1808                 goto err;
1809         }
1810
1811         if (alloc_and_init) {
1812                 q->pbl_tbl = qlnxr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
1813
1814                 if (!q->pbl_tbl) {
1815                         QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1816                         rc = -ENOMEM;
1817                         goto err;
1818                 }
1819
1820                 qlnxr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
1821         } else {
1822                 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
1823
1824                 if (!q->pbl_tbl) {
1825                         QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1826                         rc = -ENOMEM;
1827                         goto err;
1828                 }
1829         }
1830
1831         QL_DPRINT12(ha, "exit\n");
1832         return 0;
1833
1834 err:
1835         ib_umem_release(q->umem);
1836         q->umem = NULL;
1837
1838         QL_DPRINT12(ha, "exit [%d]\n", rc);
1839         return rc;
1840 }
1841
1842 #if __FreeBSD_version >= 1102000
1843
1844 struct ib_cq *
1845 qlnxr_create_cq(struct ib_device *ibdev,
1846         const struct ib_cq_init_attr *attr,
1847         struct ib_ucontext *ib_ctx,
1848         struct ib_udata *udata)
1849
1850 #else 
1851
1852 #if __FreeBSD_version >= 1100000
1853
1854 struct ib_cq *
1855 qlnxr_create_cq(struct ib_device *ibdev,
1856         struct ib_cq_init_attr *attr,
1857         struct ib_ucontext *ib_ctx,
1858         struct ib_udata *udata)
1859
1860 #else
1861
1862 struct ib_cq *
1863 qlnxr_create_cq(struct ib_device *ibdev,
1864         int entries,
1865         int vector,
1866         struct ib_ucontext *ib_ctx,
1867         struct ib_udata *udata)
1868 #endif /* #if __FreeBSD_version >= 1100000 */
1869
1870 #endif /* #if __FreeBSD_version >= 1102000 */
1871 {
1872         struct qlnxr_ucontext                   *ctx;
1873         struct ecore_rdma_destroy_cq_out_params destroy_oparams;
1874         struct ecore_rdma_destroy_cq_in_params  destroy_iparams;
1875         struct qlnxr_dev                        *dev;
1876         struct ecore_rdma_create_cq_in_params   params;
1877         struct qlnxr_create_cq_ureq             ureq;
1878
1879 #if __FreeBSD_version >= 1100000
1880         int                                     vector = attr->comp_vector;
1881         int                                     entries = attr->cqe;
1882 #endif
1883         struct qlnxr_cq                         *cq;
1884         int                                     chain_entries, rc, page_cnt;
1885         u64                                     pbl_ptr;
1886         u16                                     icid;
1887         qlnx_host_t                             *ha;
1888
1889         dev = get_qlnxr_dev(ibdev);
1890         ha = dev->ha;
1891
1892         QL_DPRINT12(ha, "called from %s. entries = %d, "
1893                 "vector = %d\n",
1894                 (udata ? "User Lib" : "Kernel"), entries, vector);
1895
1896         memset(&params, 0, sizeof(struct ecore_rdma_create_cq_in_params));
1897         memset(&destroy_iparams, 0, sizeof(struct ecore_rdma_destroy_cq_in_params));
1898         memset(&destroy_oparams, 0, sizeof(struct ecore_rdma_destroy_cq_out_params));
1899
1900         if (entries > QLNXR_MAX_CQES) {
1901                 QL_DPRINT11(ha,
1902                         "the number of entries %d is too high. "
1903                         "Must be equal or below %d.\n",
1904                         entries, QLNXR_MAX_CQES);
1905                 return ERR_PTR(-EINVAL);
1906         }
1907         chain_entries = qlnxr_align_cq_entries(entries);
1908         chain_entries = min_t(int, chain_entries, QLNXR_MAX_CQES);
1909
1910         cq = qlnx_zalloc((sizeof(struct qlnxr_cq)));
1911
1912         if (!cq)
1913                 return ERR_PTR(-ENOMEM);
1914
1915         if (udata) {
1916                 memset(&ureq, 0, sizeof(ureq));
1917
1918                 if (ib_copy_from_udata(&ureq, udata,
1919                         min(sizeof(ureq), udata->inlen))) {
1920                         QL_DPRINT11(ha, "ib_copy_from_udata failed\n");
1921                         goto err0;
1922                 }
1923
1924                 if (!ureq.len) {
1925                         QL_DPRINT11(ha, "ureq.len == 0\n");
1926                         goto err0;
1927                 }
1928
1929                 cq->cq_type = QLNXR_CQ_TYPE_USER;
1930
1931                 qlnxr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr, ureq.len,
1932                                      IB_ACCESS_LOCAL_WRITE, 1, 1);
1933
1934                 pbl_ptr = cq->q.pbl_tbl->pa;
1935                 page_cnt = cq->q.pbl_info.num_pbes;
1936                 cq->ibcq.cqe = chain_entries;
1937         } else {
1938                 cq->cq_type = QLNXR_CQ_TYPE_KERNEL;
1939
1940                 rc = ecore_chain_alloc(&dev->ha->cdev,
1941                            ECORE_CHAIN_USE_TO_CONSUME,
1942                            ECORE_CHAIN_MODE_PBL,
1943                            ECORE_CHAIN_CNT_TYPE_U32,
1944                            chain_entries,
1945                            sizeof(union roce_cqe),
1946                            &cq->pbl, NULL);
1947
1948                 if (rc)
1949                         goto err1;
1950
1951                 page_cnt = ecore_chain_get_page_cnt(&cq->pbl);
1952                 pbl_ptr = ecore_chain_get_pbl_phys(&cq->pbl);
1953                 cq->ibcq.cqe = cq->pbl.capacity;
1954         }
1955
1956         params.cq_handle_hi = upper_32_bits((uintptr_t)cq);
1957         params.cq_handle_lo = lower_32_bits((uintptr_t)cq);
1958         params.cnq_id = vector;
1959         params.cq_size = chain_entries - 1;
1960         params.pbl_num_pages = page_cnt;
1961         params.pbl_ptr = pbl_ptr;
1962         params.pbl_two_level = 0;
1963
1964         if (ib_ctx != NULL) {
1965                 ctx = get_qlnxr_ucontext(ib_ctx);
1966                 params.dpi = ctx->dpi;
1967         } else {
1968                 params.dpi = dev->dpi;
1969         }
1970
1971         rc = ecore_rdma_create_cq(dev->rdma_ctx, &params, &icid);
1972         if (rc)
1973                 goto err2;
1974
1975         cq->icid = icid;
1976         cq->sig = QLNXR_CQ_MAGIC_NUMBER;
1977         spin_lock_init(&cq->cq_lock);
1978
1979         if (ib_ctx) {
1980                 rc = qlnxr_copy_cq_uresp(dev, cq, udata);
1981                 if (rc)
1982                         goto err3;
1983         } else {
1984                 /* Generate doorbell address.
1985                  * Configure bits 3-9 with DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT.
1986                  * TODO: consider moving to device scope as it is a function of
1987                  *       the device.
1988                  * TODO: add ifdef if plan to support 16 bit.
1989                  */
1990                 cq->db_addr = dev->db_addr +
1991                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1992                 cq->db.data.icid = cq->icid;
1993                 cq->db.data.params = DB_AGG_CMD_SET <<
1994                                      RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1995
1996                 /* point to the very last element, passing it we will toggle */
1997                 cq->toggle_cqe = ecore_chain_get_last_elem(&cq->pbl);
1998                 cq->pbl_toggle = RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1999
2000                 /* must be different from pbl_toggle */
2001                 cq->latest_cqe = NULL;
2002                 consume_cqe(cq);
2003                 cq->cq_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
2004         }
2005
2006         QL_DPRINT12(ha, "exit icid = 0x%0x, addr = %p,"
2007                 " number of entries = 0x%x\n",
2008                 cq->icid, cq, params.cq_size);
2009         QL_DPRINT12(ha,"cq_addr = %p\n", cq);
2010         return &cq->ibcq;
2011
2012 err3:
2013         destroy_iparams.icid = cq->icid;
2014         ecore_rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, &destroy_oparams);
2015 err2:
2016         if (udata)
2017                 qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2018         else
2019                 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2020 err1:
2021         if (udata)
2022                 ib_umem_release(cq->q.umem);
2023 err0:
2024         kfree(cq);
2025
2026         QL_DPRINT12(ha, "exit error\n");
2027
2028         return ERR_PTR(-EINVAL);
2029 }
2030
2031 int qlnxr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
2032 {
2033         int                     status = 0;
2034         struct qlnxr_dev        *dev = get_qlnxr_dev((ibcq->device));
2035         qlnx_host_t             *ha;
2036
2037         ha = dev->ha;
2038
2039         QL_DPRINT12(ha, "enter/exit\n");
2040
2041         return status;
2042 }
2043
2044 int
2045 qlnxr_destroy_cq(struct ib_cq *ibcq)
2046 {
2047         struct qlnxr_dev                        *dev = get_qlnxr_dev((ibcq->device));
2048         struct ecore_rdma_destroy_cq_out_params oparams;
2049         struct ecore_rdma_destroy_cq_in_params  iparams;
2050         struct qlnxr_cq                         *cq = get_qlnxr_cq(ibcq);
2051         int                                     rc = 0;
2052         qlnx_host_t                             *ha;
2053
2054         ha = dev->ha;
2055
2056         QL_DPRINT12(ha, "enter cq_id = %d\n", cq->icid);
2057
2058         cq->destroyed = 1;
2059
2060         /* TODO: Syncronize irq of the CNQ the CQ belongs to for validation
2061          * that all completions with notification are dealt with. The rest
2062          * of the completions are not interesting
2063          */
2064
2065         /* GSIs CQs are handled by driver, so they don't exist in the FW */
2066
2067         if (cq->cq_type != QLNXR_CQ_TYPE_GSI) {
2068
2069                 iparams.icid = cq->icid;
2070
2071                 rc = ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
2072
2073                 if (rc) {
2074                         QL_DPRINT12(ha, "ecore_rdma_destroy_cq failed cq_id = %d\n",
2075                                 cq->icid);
2076                         return rc;
2077                 }
2078
2079                 QL_DPRINT12(ha, "free cq->pbl cq_id = %d\n", cq->icid);
2080                 ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2081         }
2082
2083         if (ibcq->uobject && ibcq->uobject->context) {
2084                 qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2085                 ib_umem_release(cq->q.umem);
2086         }
2087
2088         cq->sig = ~cq->sig;
2089
2090         kfree(cq);
2091
2092         QL_DPRINT12(ha, "exit cq_id = %d\n", cq->icid);
2093
2094         return rc;
2095 }
2096
2097 static int
2098 qlnxr_check_qp_attrs(struct ib_pd *ibpd,
2099         struct qlnxr_dev *dev,
2100         struct ib_qp_init_attr *attrs,
2101         struct ib_udata *udata)
2102 {
2103         struct ecore_rdma_device        *qattr;
2104         qlnx_host_t                     *ha;
2105
2106         qattr = ecore_rdma_query_device(dev->rdma_ctx);
2107         ha = dev->ha;
2108
2109         QL_DPRINT12(ha, "enter\n");
2110
2111         QL_DPRINT12(ha, "attrs->sq_sig_type = %d\n", attrs->sq_sig_type);
2112         QL_DPRINT12(ha, "attrs->qp_type = %d\n", attrs->qp_type);
2113         QL_DPRINT12(ha, "attrs->create_flags = %d\n", attrs->create_flags);
2114
2115 #if __FreeBSD_version < 1102000
2116         QL_DPRINT12(ha, "attrs->qpg_type = %d\n", attrs->qpg_type);
2117 #endif
2118
2119         QL_DPRINT12(ha, "attrs->port_num = %d\n", attrs->port_num);
2120         QL_DPRINT12(ha, "attrs->cap.max_send_wr = 0x%x\n", attrs->cap.max_send_wr);
2121         QL_DPRINT12(ha, "attrs->cap.max_recv_wr = 0x%x\n", attrs->cap.max_recv_wr);
2122         QL_DPRINT12(ha, "attrs->cap.max_send_sge = 0x%x\n", attrs->cap.max_send_sge);
2123         QL_DPRINT12(ha, "attrs->cap.max_recv_sge = 0x%x\n", attrs->cap.max_recv_sge);
2124         QL_DPRINT12(ha, "attrs->cap.max_inline_data = 0x%x\n",
2125                 attrs->cap.max_inline_data);
2126
2127 #if __FreeBSD_version < 1102000
2128         QL_DPRINT12(ha, "attrs->cap.qpg_tss_mask_sz = 0x%x\n",
2129                 attrs->cap.qpg_tss_mask_sz);
2130 #endif
2131
2132         QL_DPRINT12(ha, "\n\nqattr->vendor_id = 0x%x\n", qattr->vendor_id);
2133         QL_DPRINT12(ha, "qattr->vendor_part_id = 0x%x\n", qattr->vendor_part_id);
2134         QL_DPRINT12(ha, "qattr->hw_ver = 0x%x\n", qattr->hw_ver);
2135         QL_DPRINT12(ha, "qattr->fw_ver = %p\n", (void *)qattr->fw_ver);
2136         QL_DPRINT12(ha, "qattr->node_guid = %p\n", (void *)qattr->node_guid);
2137         QL_DPRINT12(ha, "qattr->sys_image_guid = %p\n",
2138                 (void *)qattr->sys_image_guid);
2139         QL_DPRINT12(ha, "qattr->max_cnq = 0x%x\n", qattr->max_cnq);
2140         QL_DPRINT12(ha, "qattr->max_sge = 0x%x\n", qattr->max_sge);
2141         QL_DPRINT12(ha, "qattr->max_srq_sge = 0x%x\n", qattr->max_srq_sge);
2142         QL_DPRINT12(ha, "qattr->max_inline = 0x%x\n", qattr->max_inline);
2143         QL_DPRINT12(ha, "qattr->max_wqe = 0x%x\n", qattr->max_wqe);
2144         QL_DPRINT12(ha, "qattr->max_srq_wqe = 0x%x\n", qattr->max_srq_wqe);
2145         QL_DPRINT12(ha, "qattr->max_qp_resp_rd_atomic_resc = 0x%x\n",
2146                 qattr->max_qp_resp_rd_atomic_resc);
2147         QL_DPRINT12(ha, "qattr->max_qp_req_rd_atomic_resc = 0x%x\n",
2148                 qattr->max_qp_req_rd_atomic_resc);
2149         QL_DPRINT12(ha, "qattr->max_dev_resp_rd_atomic_resc = 0x%x\n",
2150                 qattr->max_dev_resp_rd_atomic_resc);
2151         QL_DPRINT12(ha, "qattr->max_cq = 0x%x\n", qattr->max_cq);
2152         QL_DPRINT12(ha, "qattr->max_qp = 0x%x\n", qattr->max_qp);
2153         QL_DPRINT12(ha, "qattr->max_srq = 0x%x\n", qattr->max_srq);
2154         QL_DPRINT12(ha, "qattr->max_mr = 0x%x\n", qattr->max_mr);
2155         QL_DPRINT12(ha, "qattr->max_mr_size = %p\n", (void *)qattr->max_mr_size);
2156         QL_DPRINT12(ha, "qattr->max_cqe = 0x%x\n", qattr->max_cqe);
2157         QL_DPRINT12(ha, "qattr->max_mw = 0x%x\n", qattr->max_mw);
2158         QL_DPRINT12(ha, "qattr->max_fmr = 0x%x\n", qattr->max_fmr);
2159         QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_pbl = 0x%x\n",
2160                 qattr->max_mr_mw_fmr_pbl);
2161         QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_size = %p\n",
2162                 (void *)qattr->max_mr_mw_fmr_size);
2163         QL_DPRINT12(ha, "qattr->max_pd = 0x%x\n", qattr->max_pd);
2164         QL_DPRINT12(ha, "qattr->max_ah = 0x%x\n", qattr->max_ah);
2165         QL_DPRINT12(ha, "qattr->max_pkey = 0x%x\n", qattr->max_pkey);
2166         QL_DPRINT12(ha, "qattr->max_srq_wr = 0x%x\n", qattr->max_srq_wr);
2167         QL_DPRINT12(ha, "qattr->max_stats_queues = 0x%x\n",
2168                 qattr->max_stats_queues);
2169         //QL_DPRINT12(ha, "qattr->dev_caps = 0x%x\n", qattr->dev_caps);
2170         QL_DPRINT12(ha, "qattr->page_size_caps = %p\n",
2171                 (void *)qattr->page_size_caps);
2172         QL_DPRINT12(ha, "qattr->dev_ack_delay = 0x%x\n", qattr->dev_ack_delay);
2173         QL_DPRINT12(ha, "qattr->reserved_lkey = 0x%x\n", qattr->reserved_lkey);
2174         QL_DPRINT12(ha, "qattr->bad_pkey_counter = 0x%x\n",
2175                 qattr->bad_pkey_counter);
2176
2177         if ((attrs->qp_type == IB_QPT_GSI) && udata) {
2178                 QL_DPRINT12(ha, "unexpected udata when creating GSI QP\n");
2179                 return -EINVAL;
2180         }
2181
2182         if (udata && !(ibpd->uobject && ibpd->uobject->context)) {
2183                 QL_DPRINT12(ha, "called from user without context\n");
2184                 return -EINVAL;
2185         }
2186
2187         /* QP0... attrs->qp_type == IB_QPT_GSI */
2188         if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
2189                 QL_DPRINT12(ha, "unsupported qp type=0x%x requested\n", 
2190                            attrs->qp_type);
2191                 return -EINVAL;
2192         }
2193         if (attrs->qp_type == IB_QPT_GSI && attrs->srq) {
2194                 QL_DPRINT12(ha, "cannot create GSI qp with SRQ\n");
2195                 return -EINVAL;
2196         }
2197         /* Skip the check for QP1 to support CM size of 128 */
2198         if (attrs->cap.max_send_wr > qattr->max_wqe) {
2199                 QL_DPRINT12(ha, "cannot create a SQ with %d elements "
2200                         " (max_send_wr=0x%x)\n",
2201                         attrs->cap.max_send_wr, qattr->max_wqe);
2202                 return -EINVAL;
2203         }
2204         if (!attrs->srq && (attrs->cap.max_recv_wr > qattr->max_wqe)) {
2205                 QL_DPRINT12(ha, "cannot create a RQ with %d elements"
2206                         " (max_recv_wr=0x%x)\n",
2207                         attrs->cap.max_recv_wr, qattr->max_wqe);
2208                 return -EINVAL;
2209         }
2210         if (attrs->cap.max_inline_data > qattr->max_inline) {
2211                 QL_DPRINT12(ha,
2212                         "unsupported inline data size=0x%x "
2213                         "requested (max_inline=0x%x)\n",
2214                         attrs->cap.max_inline_data, qattr->max_inline);
2215                 return -EINVAL;
2216         }
2217         if (attrs->cap.max_send_sge > qattr->max_sge) {
2218                 QL_DPRINT12(ha,
2219                         "unsupported send_sge=0x%x "
2220                         "requested (max_send_sge=0x%x)\n",
2221                         attrs->cap.max_send_sge, qattr->max_sge);
2222                 return -EINVAL;
2223         }
2224         if (attrs->cap.max_recv_sge > qattr->max_sge) {
2225                 QL_DPRINT12(ha,
2226                         "unsupported recv_sge=0x%x requested "
2227                         " (max_recv_sge=0x%x)\n",
2228                         attrs->cap.max_recv_sge, qattr->max_sge);
2229                 return -EINVAL;
2230         }
2231         /* unprivileged user space cannot create special QP */
2232         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
2233                 QL_DPRINT12(ha,
2234                         "userspace can't create special QPs of type=0x%x\n",
2235                         attrs->qp_type);
2236                 return -EINVAL;
2237         }
2238         /* allow creating only one GSI type of QP */
2239         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
2240                 QL_DPRINT12(ha,
2241                         "create qp: GSI special QPs already created.\n");
2242                 return -EINVAL;
2243         }
2244
2245         /* verify consumer QPs are not trying to use GSI QP's CQ */
2246         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
2247                 struct qlnxr_cq *send_cq = get_qlnxr_cq(attrs->send_cq);
2248                 struct qlnxr_cq *recv_cq = get_qlnxr_cq(attrs->recv_cq);
2249
2250                 if ((send_cq->cq_type == QLNXR_CQ_TYPE_GSI) ||
2251                     (recv_cq->cq_type == QLNXR_CQ_TYPE_GSI)) {
2252                         QL_DPRINT11(ha, "consumer QP cannot use GSI CQs.\n");
2253                         return -EINVAL;
2254                 }
2255         }
2256         QL_DPRINT12(ha, "exit\n");
2257         return 0;
2258 }
2259
2260 static int
2261 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
2262         struct qlnxr_srq *srq,
2263         struct ib_udata *udata)
2264 {
2265         struct qlnxr_create_srq_uresp   uresp;
2266         qlnx_host_t                     *ha;
2267         int                             rc;
2268
2269         ha = dev->ha;
2270
2271         QL_DPRINT12(ha, "enter\n");
2272
2273         memset(&uresp, 0, sizeof(uresp));
2274
2275         uresp.srq_id = srq->srq_id;
2276
2277         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2278
2279         QL_DPRINT12(ha, "exit [%d]\n", rc);
2280         return rc;
2281 }
2282
2283 static void
2284 qlnxr_copy_rq_uresp(struct qlnxr_dev *dev,
2285         struct qlnxr_create_qp_uresp *uresp,
2286         struct qlnxr_qp *qp)
2287 {
2288         qlnx_host_t     *ha;
2289
2290         ha = dev->ha;
2291
2292         /* Return if QP is associated with SRQ instead of RQ */
2293         QL_DPRINT12(ha, "enter qp->srq = %p\n", qp->srq);
2294
2295         if (qp->srq)
2296                 return;
2297
2298         /* iWARP requires two doorbells per RQ. */
2299         if (QLNX_IS_IWARP(dev)) {
2300
2301                 uresp->rq_db_offset =
2302                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2303                 uresp->rq_db2_offset =
2304                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2305
2306                 QL_DPRINT12(ha, "uresp->rq_db_offset = 0x%x "
2307                         "uresp->rq_db2_offset = 0x%x\n",
2308                         uresp->rq_db_offset, uresp->rq_db2_offset);
2309         } else {
2310                 uresp->rq_db_offset =
2311                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2312         }
2313         uresp->rq_icid = qp->icid;
2314
2315         QL_DPRINT12(ha, "exit\n");
2316         return;
2317 }
2318
2319 static void
2320 qlnxr_copy_sq_uresp(struct qlnxr_dev *dev,
2321         struct qlnxr_create_qp_uresp *uresp,
2322         struct qlnxr_qp *qp)
2323 {
2324         qlnx_host_t     *ha;
2325
2326         ha = dev->ha;
2327
2328         QL_DPRINT12(ha, "enter\n");
2329
2330         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2331
2332         /* iWARP uses the same cid for rq and sq*/
2333         if (QLNX_IS_IWARP(dev)) {
2334                 uresp->sq_icid = qp->icid;
2335                 QL_DPRINT12(ha, "uresp->sq_icid = 0x%x\n", uresp->sq_icid);
2336         } else
2337                 uresp->sq_icid = qp->icid + 1;
2338
2339         QL_DPRINT12(ha, "exit\n");
2340         return;
2341 }
2342
2343 static int
2344 qlnxr_copy_qp_uresp(struct qlnxr_dev *dev,
2345         struct qlnxr_qp *qp,
2346         struct ib_udata *udata)
2347 {
2348         int                             rc;
2349         struct qlnxr_create_qp_uresp    uresp;
2350         qlnx_host_t                     *ha;
2351
2352         ha = dev->ha;
2353
2354         QL_DPRINT12(ha, "enter qp->icid =0x%x\n", qp->icid);
2355
2356         memset(&uresp, 0, sizeof(uresp));
2357         qlnxr_copy_sq_uresp(dev, &uresp, qp);
2358         qlnxr_copy_rq_uresp(dev, &uresp, qp);
2359
2360         uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
2361         uresp.qp_id = qp->qp_id;
2362
2363         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2364
2365         QL_DPRINT12(ha, "exit [%d]\n", rc);
2366         return rc;
2367 }
2368
2369
2370 static void
2371 qlnxr_set_common_qp_params(struct qlnxr_dev *dev,
2372         struct qlnxr_qp *qp,
2373         struct qlnxr_pd *pd,
2374         struct ib_qp_init_attr *attrs)
2375 {
2376         qlnx_host_t                     *ha;
2377
2378         ha = dev->ha;
2379
2380         QL_DPRINT12(ha, "enter\n");
2381
2382         spin_lock_init(&qp->q_lock);
2383
2384         atomic_set(&qp->refcnt, 1);
2385         qp->pd = pd;
2386         qp->sig = QLNXR_QP_MAGIC_NUMBER;
2387         qp->qp_type = attrs->qp_type;
2388         qp->max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2389         qp->sq.max_sges = attrs->cap.max_send_sge;
2390         qp->state = ECORE_ROCE_QP_STATE_RESET;
2391         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
2392         qp->sq_cq = get_qlnxr_cq(attrs->send_cq);
2393         qp->rq_cq = get_qlnxr_cq(attrs->recv_cq);
2394         qp->dev = dev;
2395
2396         if (!attrs->srq) {
2397                 /* QP is associated with RQ instead of SRQ */
2398                 qp->rq.max_sges = attrs->cap.max_recv_sge;
2399                 QL_DPRINT12(ha, "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
2400                         qp->rq.max_sges, qp->rq_cq->icid);
2401         } else {
2402                 qp->srq = get_qlnxr_srq(attrs->srq);
2403         }
2404
2405         QL_DPRINT12(ha,
2406                 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d,"
2407                 " state = %d, signaled = %d, use_srq=%d\n",
2408                 pd->pd_id, qp->qp_type, qp->max_inline_data,
2409                 qp->state, qp->signaled, ((attrs->srq) ? 1 : 0));
2410         QL_DPRINT12(ha, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
2411                 qp->sq.max_sges, qp->sq_cq->icid);
2412         return;
2413 }
2414
2415 static int
2416 qlnxr_check_srq_params(struct ib_pd *ibpd,
2417         struct qlnxr_dev *dev,
2418         struct ib_srq_init_attr *attrs)
2419 {
2420         struct ecore_rdma_device *qattr;
2421         qlnx_host_t             *ha;
2422
2423         ha = dev->ha;
2424         qattr = ecore_rdma_query_device(dev->rdma_ctx);
2425
2426         QL_DPRINT12(ha, "enter\n");
2427
2428         if (attrs->attr.max_wr > qattr->max_srq_wqe) {
2429                 QL_DPRINT12(ha, "unsupported srq_wr=0x%x"
2430                         " requested (max_srq_wr=0x%x)\n",
2431                         attrs->attr.max_wr, qattr->max_srq_wr);
2432                 return -EINVAL;
2433         }
2434
2435         if (attrs->attr.max_sge > qattr->max_sge) {
2436                 QL_DPRINT12(ha,
2437                         "unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
2438                         attrs->attr.max_sge, qattr->max_sge);
2439                 return -EINVAL;
2440         }
2441
2442         if (attrs->attr.srq_limit > attrs->attr.max_wr) {
2443                 QL_DPRINT12(ha,
2444                        "unsupported srq_limit=0x%x requested"
2445                         " (max_srq_limit=0x%x)\n",
2446                         attrs->attr.srq_limit, attrs->attr.srq_limit);
2447                 return -EINVAL;
2448         }
2449
2450         QL_DPRINT12(ha, "exit\n");
2451         return 0;
2452 }
2453
2454
2455 static void
2456 qlnxr_free_srq_user_params(struct qlnxr_srq *srq)
2457 {
2458         struct qlnxr_dev        *dev = srq->dev;
2459         qlnx_host_t             *ha;
2460
2461         ha = dev->ha;
2462
2463         QL_DPRINT12(ha, "enter\n");
2464
2465         qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2466         ib_umem_release(srq->usrq.umem);
2467         ib_umem_release(srq->prod_umem);
2468
2469         QL_DPRINT12(ha, "exit\n");
2470         return;
2471 }
2472
2473 static void
2474 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq)
2475 {
2476         struct qlnxr_srq_hwq_info *hw_srq  = &srq->hw_srq;
2477         struct qlnxr_dev        *dev = srq->dev;
2478         qlnx_host_t             *ha;
2479
2480         ha = dev->ha;
2481
2482         QL_DPRINT12(ha, "enter\n");
2483
2484         ecore_chain_free(dev->cdev, &hw_srq->pbl);
2485
2486         qlnx_dma_free_coherent(&dev->cdev,
2487                 hw_srq->virt_prod_pair_addr,
2488                 hw_srq->phy_prod_pair_addr,
2489                 sizeof(struct rdma_srq_producers));
2490
2491         QL_DPRINT12(ha, "exit\n");
2492
2493         return;
2494 }
2495
2496 static int
2497 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
2498         struct qlnxr_srq *srq,
2499         struct qlnxr_create_srq_ureq *ureq,
2500         int access, int dmasync)
2501 {
2502 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2503         struct ib_umem_chunk    *chunk;
2504 #endif
2505         struct scatterlist      *sg;
2506         int                     rc;
2507         struct qlnxr_dev        *dev = srq->dev;
2508         qlnx_host_t             *ha;
2509
2510         ha = dev->ha;
2511
2512         QL_DPRINT12(ha, "enter\n");
2513
2514         rc = qlnxr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
2515                                   ureq->srq_len, access, dmasync, 1);
2516         if (rc)
2517                 return rc;
2518
2519         srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
2520                                      sizeof(struct rdma_srq_producers),
2521                                      access, dmasync);
2522         if (IS_ERR(srq->prod_umem)) {
2523
2524                 qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2525                 ib_umem_release(srq->usrq.umem);
2526
2527                 QL_DPRINT12(ha, "ib_umem_get failed for producer [%p]\n",
2528                         PTR_ERR(srq->prod_umem));
2529
2530                 return PTR_ERR(srq->prod_umem);
2531         }
2532
2533 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2534         chunk = container_of((&srq->prod_umem->chunk_list)->next,
2535                              typeof(*chunk), list);
2536         sg = &chunk->page_list[0];
2537 #else
2538         sg = srq->prod_umem->sg_head.sgl;
2539 #endif
2540         srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
2541
2542         QL_DPRINT12(ha, "exit\n");
2543         return 0;
2544 }
2545
2546
2547 static int
2548 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
2549         struct qlnxr_dev *dev,
2550         struct ib_srq_init_attr *init_attr)
2551 {
2552         struct qlnxr_srq_hwq_info       *hw_srq  = &srq->hw_srq;
2553         dma_addr_t                      phy_prod_pair_addr;
2554         u32                             num_elems, max_wr;
2555         void                            *va;
2556         int                             rc;
2557         qlnx_host_t                     *ha;
2558
2559         ha = dev->ha;
2560
2561         QL_DPRINT12(ha, "enter\n");
2562
2563         va = qlnx_dma_alloc_coherent(&dev->cdev,
2564                         &phy_prod_pair_addr,
2565                         sizeof(struct rdma_srq_producers));
2566         if (!va) {
2567                 QL_DPRINT11(ha, "qlnx_dma_alloc_coherent failed for produceer\n");
2568                 return -ENOMEM;
2569         }
2570
2571         hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
2572         hw_srq->virt_prod_pair_addr = va;
2573
2574         max_wr = init_attr->attr.max_wr;
2575
2576         num_elems = max_wr * RDMA_MAX_SRQ_WQE_SIZE;
2577
2578         rc = ecore_chain_alloc(dev->cdev,
2579                    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2580                    ECORE_CHAIN_MODE_PBL,
2581                    ECORE_CHAIN_CNT_TYPE_U32,
2582                    num_elems,
2583                    ECORE_RDMA_SRQ_WQE_ELEM_SIZE,
2584                    &hw_srq->pbl, NULL);
2585
2586         if (rc) {
2587                 QL_DPRINT11(ha, "ecore_chain_alloc failed [%d]\n", rc);
2588                 goto err0;
2589         }
2590
2591         hw_srq->max_wr = max_wr;
2592         hw_srq->num_elems = num_elems;
2593         hw_srq->max_sges = RDMA_MAX_SGE_PER_SRQ;
2594
2595         QL_DPRINT12(ha, "exit\n");
2596         return 0;
2597
2598 err0:
2599         qlnx_dma_free_coherent(&dev->cdev, va, phy_prod_pair_addr,
2600                 sizeof(struct rdma_srq_producers));
2601
2602         QL_DPRINT12(ha, "exit [%d]\n", rc);
2603         return rc;
2604 }
2605
2606 static inline void
2607 qlnxr_init_common_qp_in_params(struct qlnxr_dev *dev,
2608         struct qlnxr_pd *pd,
2609         struct qlnxr_qp *qp,
2610         struct ib_qp_init_attr *attrs,
2611         bool fmr_and_reserved_lkey,
2612         struct ecore_rdma_create_qp_in_params *params)
2613 {
2614         qlnx_host_t     *ha;
2615
2616         ha = dev->ha;
2617
2618         QL_DPRINT12(ha, "enter\n");
2619
2620         /* QP handle to be written in an async event */
2621         params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
2622         params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
2623
2624         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
2625         params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
2626         params->pd = pd->pd_id;
2627         params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
2628         params->sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
2629         params->stats_queue = 0;
2630
2631         params->rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
2632
2633         if (qp->srq) {
2634                 /* QP is associated with SRQ instead of RQ */
2635                 params->srq_id = qp->srq->srq_id;
2636                 params->use_srq = true;
2637                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
2638                         params->srq_id, params->use_srq);
2639                 return;
2640         }
2641
2642         params->srq_id = 0;
2643         params->use_srq = false;
2644
2645         QL_DPRINT12(ha, "exit\n");
2646         return;
2647 }
2648
2649
2650 static inline void
2651 qlnxr_qp_user_print( struct qlnxr_dev *dev,
2652         struct qlnxr_qp *qp)
2653 {
2654         QL_DPRINT12((dev->ha), "qp=%p. sq_addr=0x%llx, sq_len=%zd, "
2655                 "rq_addr=0x%llx, rq_len=%zd\n",
2656                 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
2657                 qp->urq.buf_len);
2658         return;
2659 }
2660
2661 static int
2662 qlnxr_idr_add(struct qlnxr_dev *dev, void *ptr, u32 id)
2663 {
2664         u32             newid;
2665         int             rc;
2666         qlnx_host_t     *ha;
2667
2668         ha = dev->ha;
2669
2670         QL_DPRINT12(ha, "enter\n");
2671
2672         if (!QLNX_IS_IWARP(dev))
2673                 return 0;
2674
2675         do {
2676                 if (!idr_pre_get(&dev->qpidr, GFP_KERNEL)) {
2677                         QL_DPRINT11(ha, "idr_pre_get failed\n");
2678                         return -ENOMEM;
2679                 }
2680
2681                 mtx_lock(&dev->idr_lock);
2682
2683                 rc = idr_get_new_above(&dev->qpidr, ptr, id, &newid);
2684
2685                 mtx_unlock(&dev->idr_lock);
2686
2687         } while (rc == -EAGAIN);
2688
2689         QL_DPRINT12(ha, "exit [%d]\n", rc);
2690
2691         return rc;
2692 }
2693
2694 static void
2695 qlnxr_idr_remove(struct qlnxr_dev *dev, u32 id)
2696 {
2697         qlnx_host_t     *ha;
2698
2699         ha = dev->ha;
2700
2701         QL_DPRINT12(ha, "enter\n");
2702
2703         if (!QLNX_IS_IWARP(dev))
2704                 return;
2705
2706         mtx_lock(&dev->idr_lock);
2707         idr_remove(&dev->qpidr, id);
2708         mtx_unlock(&dev->idr_lock);
2709
2710         QL_DPRINT12(ha, "exit \n");
2711
2712         return;
2713 }
2714
2715 static inline void
2716 qlnxr_iwarp_populate_user_qp(struct qlnxr_dev *dev,
2717         struct qlnxr_qp *qp,
2718         struct ecore_rdma_create_qp_out_params *out_params)
2719 {
2720         qlnx_host_t     *ha;
2721
2722         ha = dev->ha;
2723
2724         QL_DPRINT12(ha, "enter\n");
2725
2726         qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
2727         qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
2728
2729         qlnxr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
2730                            &qp->usq.pbl_info);
2731
2732         if (qp->srq) {
2733                 QL_DPRINT11(ha, "qp->srq = %p\n", qp->srq);
2734                 return;
2735         }
2736
2737         qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
2738         qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
2739
2740         qlnxr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
2741                            &qp->urq.pbl_info);
2742
2743         QL_DPRINT12(ha, "exit\n");
2744         return;
2745 }
2746
2747 static int
2748 qlnxr_create_user_qp(struct qlnxr_dev *dev,
2749         struct qlnxr_qp *qp,
2750         struct ib_pd *ibpd,
2751         struct ib_udata *udata,
2752         struct ib_qp_init_attr *attrs)
2753 {
2754         struct ecore_rdma_destroy_qp_out_params d_out_params;
2755         struct ecore_rdma_create_qp_in_params in_params;
2756         struct ecore_rdma_create_qp_out_params out_params;
2757         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
2758         struct ib_ucontext *ib_ctx = NULL;
2759         struct qlnxr_ucontext *ctx = NULL;
2760         struct qlnxr_create_qp_ureq ureq;
2761         int alloc_and_init = QLNX_IS_ROCE(dev);
2762         int rc = -EINVAL;
2763         qlnx_host_t     *ha;
2764
2765         ha = dev->ha;
2766
2767         QL_DPRINT12(ha, "enter\n");
2768
2769         ib_ctx = ibpd->uobject->context;
2770         ctx = get_qlnxr_ucontext(ib_ctx);
2771
2772         memset(&ureq, 0, sizeof(ureq));
2773         rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
2774
2775         if (rc) {
2776                 QL_DPRINT11(ha, "ib_copy_from_udata failed [%d]\n", rc);
2777                 return rc;
2778         }
2779
2780         /* SQ - read access only (0), dma sync not required (0) */
2781         rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
2782                                   ureq.sq_len, 0, 0,
2783                                   alloc_and_init);
2784         if (rc) {
2785                 QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2786                 return rc;
2787         }
2788
2789         if (!qp->srq) {
2790                 /* RQ - read access only (0), dma sync not required (0) */
2791                 rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
2792                                           ureq.rq_len, 0, 0,
2793                                           alloc_and_init);
2794
2795                 if (rc) {
2796                         QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2797                         return rc;
2798                 }
2799         }
2800
2801         memset(&in_params, 0, sizeof(in_params));
2802         qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
2803         in_params.qp_handle_lo = ureq.qp_handle_lo;
2804         in_params.qp_handle_hi = ureq.qp_handle_hi;
2805         in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
2806         in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
2807
2808         if (!qp->srq) {
2809                 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
2810                 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
2811         }
2812
2813         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, &in_params, &out_params);
2814
2815         if (!qp->ecore_qp) {
2816                 rc = -ENOMEM;
2817                 QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2818                 goto err1;
2819         }
2820
2821         if (QLNX_IS_IWARP(dev))
2822                 qlnxr_iwarp_populate_user_qp(dev, qp, &out_params);
2823
2824         qp->qp_id = out_params.qp_id;
2825         qp->icid = out_params.icid;
2826
2827         rc = qlnxr_copy_qp_uresp(dev, qp, udata);
2828
2829         if (rc) {
2830                 QL_DPRINT11(ha, "qlnxr_copy_qp_uresp failed\n");
2831                 goto err;
2832         }
2833
2834         qlnxr_qp_user_print(dev, qp);
2835
2836         QL_DPRINT12(ha, "exit\n");
2837         return 0;
2838 err:
2839         rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
2840
2841         if (rc)
2842                 QL_DPRINT12(ha, "fatal fault\n");
2843
2844 err1:
2845         qlnxr_cleanup_user(dev, qp);
2846
2847         QL_DPRINT12(ha, "exit[%d]\n", rc);
2848         return rc;
2849 }
2850
2851 static void
2852 qlnxr_set_roce_db_info(struct qlnxr_dev *dev,
2853         struct qlnxr_qp *qp)
2854 {
2855         qlnx_host_t     *ha;
2856
2857         ha = dev->ha;
2858
2859         QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2860
2861         qp->sq.db = dev->db_addr +
2862                 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2863         qp->sq.db_data.data.icid = qp->icid + 1;
2864
2865         if (!qp->srq) {
2866                 qp->rq.db = dev->db_addr +
2867                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2868                 qp->rq.db_data.data.icid = qp->icid;
2869         }
2870
2871         QL_DPRINT12(ha, "exit\n");
2872         return;
2873 }
2874
2875 static void
2876 qlnxr_set_iwarp_db_info(struct qlnxr_dev *dev,
2877         struct qlnxr_qp *qp)
2878
2879 {
2880         qlnx_host_t     *ha;
2881
2882         ha = dev->ha;
2883
2884         QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2885
2886         qp->sq.db = dev->db_addr +
2887                 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2888         qp->sq.db_data.data.icid = qp->icid;
2889
2890         if (!qp->srq) {
2891                 qp->rq.db = dev->db_addr +
2892                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2893                 qp->rq.db_data.data.icid = qp->icid;
2894
2895                 qp->rq.iwarp_db2 = dev->db_addr +
2896                         DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2897                 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2898                 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2899         }
2900
2901         QL_DPRINT12(ha,
2902                 "qp->sq.db = %p qp->sq.db_data.data.icid =0x%x\n"
2903                 "\t\t\tqp->rq.db = %p qp->rq.db_data.data.icid =0x%x\n"
2904                 "\t\t\tqp->rq.iwarp_db2 = %p qp->rq.iwarp_db2.data.icid =0x%x"
2905                 " qp->rq.iwarp_db2.data.prod_val =0x%x\n",
2906                 qp->sq.db, qp->sq.db_data.data.icid,
2907                 qp->rq.db, qp->rq.db_data.data.icid,
2908                 qp->rq.iwarp_db2, qp->rq.iwarp_db2_data.data.icid,
2909                 qp->rq.iwarp_db2_data.data.value);
2910
2911         QL_DPRINT12(ha, "exit\n");
2912         return;
2913 }
2914
2915 static int
2916 qlnxr_roce_create_kernel_qp(struct qlnxr_dev *dev,
2917         struct qlnxr_qp *qp,
2918         struct ecore_rdma_create_qp_in_params *in_params,
2919         u32 n_sq_elems,
2920         u32 n_rq_elems)
2921 {
2922         struct ecore_rdma_create_qp_out_params out_params;
2923         int             rc;
2924         qlnx_host_t     *ha;
2925
2926         ha = dev->ha;
2927
2928         QL_DPRINT12(ha, "enter\n");
2929
2930         rc = ecore_chain_alloc(
2931                 dev->cdev,
2932                 ECORE_CHAIN_USE_TO_PRODUCE,
2933                 ECORE_CHAIN_MODE_PBL,
2934                 ECORE_CHAIN_CNT_TYPE_U32,
2935                 n_sq_elems,
2936                 QLNXR_SQE_ELEMENT_SIZE,
2937                 &qp->sq.pbl,
2938                 NULL);
2939
2940         if (rc) {
2941                 QL_DPRINT11(ha, "ecore_chain_alloc qp->sq.pbl failed[%d]\n", rc);
2942                 return rc;
2943         }
2944
2945         in_params->sq_num_pages = ecore_chain_get_page_cnt(&qp->sq.pbl);
2946         in_params->sq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->sq.pbl);
2947
2948         if (!qp->srq) {
2949
2950                 rc = ecore_chain_alloc(
2951                         dev->cdev,
2952                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2953                         ECORE_CHAIN_MODE_PBL,
2954                         ECORE_CHAIN_CNT_TYPE_U32,
2955                         n_rq_elems,
2956                         QLNXR_RQE_ELEMENT_SIZE,
2957                         &qp->rq.pbl,
2958                         NULL);
2959
2960                 if (rc) {
2961                         QL_DPRINT11(ha,
2962                                 "ecore_chain_alloc qp->rq.pbl failed[%d]\n", rc);
2963                         return rc;
2964                 }
2965
2966                 in_params->rq_num_pages = ecore_chain_get_page_cnt(&qp->rq.pbl);
2967                 in_params->rq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->rq.pbl);
2968         }
2969
2970         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2971
2972         if (!qp->ecore_qp) {
2973                 QL_DPRINT11(ha, "qp->ecore_qp == NULL\n");
2974                 return -EINVAL;
2975         }
2976
2977         qp->qp_id = out_params.qp_id;
2978         qp->icid = out_params.icid;
2979
2980         qlnxr_set_roce_db_info(dev, qp);
2981
2982         QL_DPRINT12(ha, "exit\n");
2983         return 0;
2984 }
2985
2986 static int
2987 qlnxr_iwarp_create_kernel_qp(struct qlnxr_dev *dev,
2988         struct qlnxr_qp *qp,
2989         struct ecore_rdma_create_qp_in_params *in_params,
2990         u32 n_sq_elems,
2991         u32 n_rq_elems)
2992 {
2993         struct ecore_rdma_destroy_qp_out_params d_out_params;
2994         struct ecore_rdma_create_qp_out_params out_params;
2995         struct ecore_chain_ext_pbl ext_pbl;
2996         int rc;
2997         qlnx_host_t     *ha;
2998
2999         ha = dev->ha;
3000
3001         QL_DPRINT12(ha, "enter\n");
3002
3003         in_params->sq_num_pages = ECORE_CHAIN_PAGE_CNT(n_sq_elems,
3004                                                      QLNXR_SQE_ELEMENT_SIZE,
3005                                                      ECORE_CHAIN_MODE_PBL);
3006         in_params->rq_num_pages = ECORE_CHAIN_PAGE_CNT(n_rq_elems,
3007                                                      QLNXR_RQE_ELEMENT_SIZE,
3008                                                      ECORE_CHAIN_MODE_PBL);
3009
3010         QL_DPRINT12(ha, "n_sq_elems = 0x%x"
3011                 " n_rq_elems = 0x%x in_params\n"
3012                 "\t\t\tqp_handle_lo\t\t= 0x%08x\n"
3013                 "\t\t\tqp_handle_hi\t\t= 0x%08x\n"
3014                 "\t\t\tqp_handle_async_lo\t\t= 0x%08x\n"
3015                 "\t\t\tqp_handle_async_hi\t\t= 0x%08x\n"
3016                 "\t\t\tuse_srq\t\t\t= 0x%x\n"
3017                 "\t\t\tsignal_all\t\t= 0x%x\n"
3018                 "\t\t\tfmr_and_reserved_lkey\t= 0x%x\n"
3019                 "\t\t\tpd\t\t\t= 0x%x\n"
3020                 "\t\t\tdpi\t\t\t= 0x%x\n"
3021                 "\t\t\tsq_cq_id\t\t\t= 0x%x\n"
3022                 "\t\t\tsq_num_pages\t\t= 0x%x\n"
3023                 "\t\t\tsq_pbl_ptr\t\t= %p\n"
3024                 "\t\t\tmax_sq_sges\t\t= 0x%x\n"
3025                 "\t\t\trq_cq_id\t\t\t= 0x%x\n"
3026                 "\t\t\trq_num_pages\t\t= 0x%x\n"
3027                 "\t\t\trq_pbl_ptr\t\t= %p\n"
3028                 "\t\t\tsrq_id\t\t\t= 0x%x\n"
3029                 "\t\t\tstats_queue\t\t= 0x%x\n",
3030                 n_sq_elems, n_rq_elems,
3031                 in_params->qp_handle_lo,
3032                 in_params->qp_handle_hi,
3033                 in_params->qp_handle_async_lo,
3034                 in_params->qp_handle_async_hi,
3035                 in_params->use_srq,
3036                 in_params->signal_all,
3037                 in_params->fmr_and_reserved_lkey,
3038                 in_params->pd,
3039                 in_params->dpi,
3040                 in_params->sq_cq_id,
3041                 in_params->sq_num_pages,
3042                 (void *)in_params->sq_pbl_ptr,
3043                 in_params->max_sq_sges,
3044                 in_params->rq_cq_id,
3045                 in_params->rq_num_pages,
3046                 (void *)in_params->rq_pbl_ptr,
3047                 in_params->srq_id,
3048                 in_params->stats_queue );
3049
3050         memset(&out_params, 0, sizeof (struct ecore_rdma_create_qp_out_params));
3051         memset(&ext_pbl, 0, sizeof (struct ecore_chain_ext_pbl));
3052
3053         qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
3054
3055         if (!qp->ecore_qp) {
3056                 QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
3057                 return -EINVAL;
3058         }
3059
3060         /* Now we allocate the chain */
3061         ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
3062         ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
3063
3064         QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
3065                 "ext_pbl.p_pbl_phys = %p\n",
3066                 ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3067                 
3068         rc = ecore_chain_alloc(
3069                 dev->cdev,
3070                 ECORE_CHAIN_USE_TO_PRODUCE,
3071                 ECORE_CHAIN_MODE_PBL,
3072                 ECORE_CHAIN_CNT_TYPE_U32,
3073                 n_sq_elems,
3074                 QLNXR_SQE_ELEMENT_SIZE,
3075                 &qp->sq.pbl,
3076                 &ext_pbl);
3077
3078         if (rc) {
3079                 QL_DPRINT11(ha,
3080                         "ecore_chain_alloc qp->sq.pbl failed rc = %d\n", rc);
3081                 goto err;
3082         }
3083
3084         ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
3085         ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
3086
3087         QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
3088                 "ext_pbl.p_pbl_phys = %p\n",
3089                 ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3090
3091         if (!qp->srq) {
3092
3093                 rc = ecore_chain_alloc(
3094                         dev->cdev,
3095                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
3096                         ECORE_CHAIN_MODE_PBL,
3097                         ECORE_CHAIN_CNT_TYPE_U32,
3098                         n_rq_elems,
3099                         QLNXR_RQE_ELEMENT_SIZE,
3100                         &qp->rq.pbl,
3101                         &ext_pbl);
3102
3103                 if (rc) {
3104                         QL_DPRINT11(ha,, "ecore_chain_alloc qp->rq.pbl"
3105                                 " failed rc = %d\n", rc);
3106                         goto err;
3107                 }
3108         }
3109
3110         QL_DPRINT12(ha, "qp_id = 0x%x icid =0x%x\n",
3111                 out_params.qp_id, out_params.icid);
3112
3113         qp->qp_id = out_params.qp_id;
3114         qp->icid = out_params.icid;
3115
3116         qlnxr_set_iwarp_db_info(dev, qp);
3117
3118         QL_DPRINT12(ha, "exit\n");
3119         return 0;
3120
3121 err:
3122         ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
3123
3124         QL_DPRINT12(ha, "exit rc = %d\n", rc);
3125         return rc;
3126 }
3127
3128 static int
3129 qlnxr_create_kernel_qp(struct qlnxr_dev *dev,
3130         struct qlnxr_qp *qp,
3131         struct ib_pd *ibpd,
3132         struct ib_qp_init_attr *attrs)
3133 {
3134         struct ecore_rdma_create_qp_in_params in_params;
3135         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3136         int rc = -EINVAL;
3137         u32 n_rq_elems;
3138         u32 n_sq_elems;
3139         u32 n_sq_entries;
3140         struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3141         qlnx_host_t     *ha;
3142
3143         ha = dev->ha;
3144
3145         QL_DPRINT12(ha, "enter\n");
3146
3147         memset(&in_params, 0, sizeof(in_params));
3148
3149         /* A single work request may take up to MAX_SQ_WQE_SIZE elements in
3150          * the ring. The ring should allow at least a single WR, even if the
3151          * user requested none, due to allocation issues.
3152          * We should add an extra WR since the prod and cons indices of
3153          * wqe_wr_id are managed in such a way that the WQ is considered full
3154          * when (prod+1)%max_wr==cons. We currently don't do that because we
3155          * double the number of entries due an iSER issue that pushes far more
3156          * WRs than indicated. If we decline its ib_post_send() then we get
3157          * error prints in the dmesg we'd like to avoid.
3158          */
3159         qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
3160                               qattr->max_wqe);
3161
3162         qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
3163                         GFP_KERNEL);
3164         if (!qp->wqe_wr_id) {
3165                 QL_DPRINT11(ha, "failed SQ shadow memory allocation\n");
3166                 return -ENOMEM;
3167         }
3168
3169         /* QP handle to be written in CQE */
3170         in_params.qp_handle_lo = lower_32_bits((uintptr_t)qp);
3171         in_params.qp_handle_hi = upper_32_bits((uintptr_t)qp);
3172
3173         /* A single work request may take up to MAX_RQ_WQE_SIZE elements in
3174          * the ring. There ring should allow at least a single WR, even if the
3175          * user requested none, due to allocation issues.
3176          */
3177         qp->rq.max_wr = (u16)max_t(u32, attrs->cap.max_recv_wr, 1);
3178
3179         /* Allocate driver internal RQ array */
3180         if (!qp->srq) {
3181                 qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
3182                                         GFP_KERNEL);
3183                 if (!qp->rqe_wr_id) {
3184                         QL_DPRINT11(ha, "failed RQ shadow memory allocation\n");
3185                         kfree(qp->wqe_wr_id);
3186                         return -ENOMEM;
3187                 }
3188         }
3189
3190         //qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
3191
3192         in_params.qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
3193         in_params.qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
3194
3195         in_params.signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
3196         in_params.fmr_and_reserved_lkey = true;
3197         in_params.pd = pd->pd_id;
3198         in_params.dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
3199         in_params.sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
3200         in_params.stats_queue = 0;
3201
3202         in_params.rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
3203
3204         if (qp->srq) {
3205                 /* QP is associated with SRQ instead of RQ */
3206                 in_params.srq_id = qp->srq->srq_id;
3207                 in_params.use_srq = true;
3208                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
3209                         in_params.srq_id, in_params.use_srq);
3210         } else {
3211                 in_params.srq_id = 0;
3212                 in_params.use_srq = false;
3213         }
3214
3215         n_sq_entries = attrs->cap.max_send_wr;
3216         n_sq_entries = min_t(u32, n_sq_entries, qattr->max_wqe);
3217         n_sq_entries = max_t(u32, n_sq_entries, 1);
3218         n_sq_elems = n_sq_entries * QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
3219
3220         n_rq_elems = qp->rq.max_wr * QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
3221
3222         if (QLNX_IS_ROCE(dev)) {
3223                 rc = qlnxr_roce_create_kernel_qp(dev, qp, &in_params,
3224                                                 n_sq_elems, n_rq_elems);
3225         } else {
3226                 rc = qlnxr_iwarp_create_kernel_qp(dev, qp, &in_params,
3227                                                  n_sq_elems, n_rq_elems);
3228         }
3229
3230         if (rc)
3231                 qlnxr_cleanup_kernel(dev, qp);
3232
3233         QL_DPRINT12(ha, "exit [%d]\n", rc);
3234         return rc;
3235 }
3236
3237 struct ib_qp *
3238 qlnxr_create_qp(struct ib_pd *ibpd,
3239                 struct ib_qp_init_attr *attrs,
3240                 struct ib_udata *udata)
3241 {
3242         struct qlnxr_dev *dev = get_qlnxr_dev(ibpd->device);
3243         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3244         struct qlnxr_qp *qp;
3245         int rc = 0;
3246         qlnx_host_t     *ha;
3247
3248         ha = dev->ha;
3249
3250         QL_DPRINT12(ha, "enter\n");
3251
3252         rc = qlnxr_check_qp_attrs(ibpd, dev, attrs, udata);
3253         if (rc) {
3254                 QL_DPRINT11(ha, "qlnxr_check_qp_attrs failed [%d]\n", rc);
3255                 return ERR_PTR(rc);
3256         }
3257
3258         QL_DPRINT12(ha, "called from %s, event_handle=%p,"
3259                 " eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
3260                 (udata ? "user library" : "kernel"),
3261                 attrs->event_handler, pd,
3262                 get_qlnxr_cq(attrs->send_cq),
3263                 get_qlnxr_cq(attrs->send_cq)->icid,
3264                 get_qlnxr_cq(attrs->recv_cq),
3265                 get_qlnxr_cq(attrs->recv_cq)->icid);
3266
3267         qp = qlnx_zalloc(sizeof(struct qlnxr_qp));
3268
3269         if (!qp) {
3270                 QL_DPRINT11(ha, "kzalloc(qp) failed\n");
3271                 return ERR_PTR(-ENOMEM);
3272         }
3273
3274         qlnxr_set_common_qp_params(dev, qp, pd, attrs);
3275
3276         if (attrs->qp_type == IB_QPT_GSI) {
3277                 QL_DPRINT11(ha, "calling qlnxr_create_gsi_qp\n");
3278                 return qlnxr_create_gsi_qp(dev, attrs, qp);
3279         }
3280
3281         if (udata) {
3282                 rc = qlnxr_create_user_qp(dev, qp, ibpd, udata, attrs);
3283
3284                 if (rc) {
3285                         QL_DPRINT11(ha, "qlnxr_create_user_qp failed\n");
3286                         goto err;
3287                 }
3288         } else {
3289                 rc = qlnxr_create_kernel_qp(dev, qp, ibpd, attrs);
3290
3291                 if (rc) {
3292                         QL_DPRINT11(ha, "qlnxr_create_kernel_qp failed\n");
3293                         goto err;
3294                 }
3295         }
3296
3297         qp->ibqp.qp_num = qp->qp_id;
3298
3299         rc = qlnxr_idr_add(dev, qp, qp->qp_id);
3300
3301         if (rc) {
3302                 QL_DPRINT11(ha, "qlnxr_idr_add failed\n");
3303                 goto err;
3304         }
3305
3306         QL_DPRINT12(ha, "exit [%p]\n", &qp->ibqp);
3307
3308         return &qp->ibqp;
3309 err:
3310         kfree(qp);
3311
3312         QL_DPRINT12(ha, "failed exit\n");
3313         return ERR_PTR(-EFAULT);
3314 }
3315
3316
3317 static enum ib_qp_state
3318 qlnxr_get_ibqp_state(enum ecore_roce_qp_state qp_state)
3319 {
3320         enum ib_qp_state state = IB_QPS_ERR;
3321
3322         switch (qp_state) {
3323         case ECORE_ROCE_QP_STATE_RESET:
3324                 state = IB_QPS_RESET;
3325                 break;
3326
3327         case ECORE_ROCE_QP_STATE_INIT:
3328                 state = IB_QPS_INIT;
3329                 break;
3330
3331         case ECORE_ROCE_QP_STATE_RTR:
3332                 state = IB_QPS_RTR;
3333                 break;
3334
3335         case ECORE_ROCE_QP_STATE_RTS:
3336                 state = IB_QPS_RTS;
3337                 break;
3338
3339         case ECORE_ROCE_QP_STATE_SQD:
3340                 state = IB_QPS_SQD;
3341                 break;
3342
3343         case ECORE_ROCE_QP_STATE_ERR:
3344                 state = IB_QPS_ERR;
3345                 break;
3346
3347         case ECORE_ROCE_QP_STATE_SQE:
3348                 state = IB_QPS_SQE;
3349                 break;
3350         }
3351         return state;
3352 }
3353
3354 static enum ecore_roce_qp_state
3355 qlnxr_get_state_from_ibqp( enum ib_qp_state qp_state)
3356 {
3357         enum ecore_roce_qp_state ecore_qp_state;
3358
3359         ecore_qp_state = ECORE_ROCE_QP_STATE_ERR;
3360
3361         switch (qp_state) {
3362         case IB_QPS_RESET:
3363                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RESET;
3364                 break;
3365
3366         case IB_QPS_INIT:
3367                 ecore_qp_state =  ECORE_ROCE_QP_STATE_INIT;
3368                 break;
3369
3370         case IB_QPS_RTR:
3371                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RTR;
3372                 break;
3373
3374         case IB_QPS_RTS:
3375                 ecore_qp_state =  ECORE_ROCE_QP_STATE_RTS;
3376                 break;
3377
3378         case IB_QPS_SQD:
3379                 ecore_qp_state =  ECORE_ROCE_QP_STATE_SQD;
3380                 break;
3381
3382         case IB_QPS_ERR:
3383                 ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3384                 break;
3385
3386         default:
3387                 ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3388                 break;
3389         }
3390
3391         return (ecore_qp_state);
3392 }
3393
3394 static void
3395 qlnxr_reset_qp_hwq_info(struct qlnxr_qp_hwq_info *qph)
3396 {
3397         ecore_chain_reset(&qph->pbl);
3398         qph->prod = qph->cons = 0;
3399         qph->wqe_cons = 0;
3400         qph->db_data.data.value = cpu_to_le16(0);
3401
3402         return;
3403 }
3404
3405 static int
3406 qlnxr_update_qp_state(struct qlnxr_dev *dev,
3407         struct qlnxr_qp *qp,
3408         enum ecore_roce_qp_state new_state)
3409 {
3410         int             status = 0;
3411         uint32_t        reg_addr;
3412         struct ecore_dev *cdev;
3413         qlnx_host_t     *ha;
3414
3415         ha = dev->ha;
3416         cdev = &ha->cdev;
3417
3418         QL_DPRINT12(ha, "enter qp = %p new_state = 0x%x qp->state = 0x%x\n",
3419                 qp, new_state, qp->state);
3420
3421         if (new_state == qp->state) {
3422                 return 0;
3423         }
3424
3425         switch (qp->state) {
3426         case ECORE_ROCE_QP_STATE_RESET:
3427                 switch (new_state) {
3428                 case ECORE_ROCE_QP_STATE_INIT:
3429                         qp->prev_wqe_size = 0;
3430                         qlnxr_reset_qp_hwq_info(&qp->sq);
3431                         if (!(qp->srq))
3432                                 qlnxr_reset_qp_hwq_info(&qp->rq);
3433                         break;
3434                 default:
3435                         status = -EINVAL;
3436                         break;
3437                 };
3438                 break;
3439         case ECORE_ROCE_QP_STATE_INIT:
3440                 /* INIT->XXX */
3441                 switch (new_state) {
3442                 case ECORE_ROCE_QP_STATE_RTR:
3443                 /* Update doorbell (in case post_recv was done before move to RTR) */
3444                         if (qp->srq)
3445                                 break;
3446                         wmb();
3447                         //writel(qp->rq.db_data.raw, qp->rq.db);
3448                         //if (QLNX_IS_IWARP(dev))
3449                         //      writel(qp->rq.iwarp_db2_data.raw,
3450                         //             qp->rq.iwarp_db2);
3451
3452                         reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
3453                                         (uint8_t *)cdev->doorbells);
3454
3455                         bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
3456                         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3457
3458                         if (QLNX_IS_IWARP(dev)) {
3459                                 reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
3460                                         (uint8_t *)cdev->doorbells);
3461                                 bus_write_4(ha->pci_dbells, reg_addr,\
3462                                         qp->rq.iwarp_db2_data.raw);
3463                                 bus_barrier(ha->pci_dbells,  0, 0,\
3464                                         BUS_SPACE_BARRIER_READ);
3465                         }
3466
3467                         
3468                         mmiowb();
3469                         break;
3470                 case ECORE_ROCE_QP_STATE_ERR:
3471                         /* TBD:flush qps... */
3472                         break;
3473                 default:
3474                         /* invalid state change. */
3475                         status = -EINVAL;
3476                         break;
3477                 };
3478                 break;
3479         case ECORE_ROCE_QP_STATE_RTR:
3480                 /* RTR->XXX */
3481                 switch (new_state) {
3482                 case ECORE_ROCE_QP_STATE_RTS:
3483                         break;
3484                 case ECORE_ROCE_QP_STATE_ERR:
3485                         break;
3486                 default:
3487                         /* invalid state change. */
3488                         status = -EINVAL;
3489                         break;
3490                 };
3491                 break;
3492         case ECORE_ROCE_QP_STATE_RTS:
3493                 /* RTS->XXX */
3494                 switch (new_state) {
3495                 case ECORE_ROCE_QP_STATE_SQD:
3496                         break;
3497                 case ECORE_ROCE_QP_STATE_ERR:
3498                         break;
3499                 default:
3500                         /* invalid state change. */
3501                         status = -EINVAL;
3502                         break;
3503                 };
3504                 break;
3505         case ECORE_ROCE_QP_STATE_SQD:
3506                 /* SQD->XXX */
3507                 switch (new_state) {
3508                 case ECORE_ROCE_QP_STATE_RTS:
3509                 case ECORE_ROCE_QP_STATE_ERR:
3510                         break;
3511                 default:
3512                         /* invalid state change. */
3513                         status = -EINVAL;
3514                         break;
3515                 };
3516                 break;
3517         case ECORE_ROCE_QP_STATE_ERR:
3518                 /* ERR->XXX */
3519                 switch (new_state) {
3520                 case ECORE_ROCE_QP_STATE_RESET:
3521                         if ((qp->rq.prod != qp->rq.cons) ||
3522                             (qp->sq.prod != qp->sq.cons)) {
3523                                 QL_DPRINT11(ha,
3524                                         "Error->Reset with rq/sq "
3525                                         "not empty rq.prod=0x%x rq.cons=0x%x"
3526                                         " sq.prod=0x%x sq.cons=0x%x\n",
3527                                         qp->rq.prod, qp->rq.cons,
3528                                         qp->sq.prod, qp->sq.cons);
3529                                 status = -EINVAL;
3530                         }
3531                         break;
3532                 default:
3533                         status = -EINVAL;
3534                         break;
3535                 };
3536                 break;
3537         default:
3538                 status = -EINVAL;
3539                 break;
3540         };
3541
3542         QL_DPRINT12(ha, "exit\n");
3543         return status;
3544 }
3545
3546 int
3547 qlnxr_modify_qp(struct ib_qp    *ibqp,
3548         struct ib_qp_attr       *attr,
3549         int                     attr_mask,
3550         struct ib_udata         *udata)
3551 {
3552         int rc = 0;
3553         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3554         struct qlnxr_dev *dev = get_qlnxr_dev(&qp->dev->ibdev);
3555         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
3556         enum ib_qp_state old_qp_state, new_qp_state;
3557         struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3558         qlnx_host_t     *ha;
3559
3560         ha = dev->ha;
3561
3562         QL_DPRINT12(ha,
3563                 "enter qp = %p attr_mask = 0x%x, state = %d udata = %p\n",
3564                 qp, attr_mask, attr->qp_state, udata);
3565
3566         old_qp_state = qlnxr_get_ibqp_state(qp->state);
3567         if (attr_mask & IB_QP_STATE)
3568                 new_qp_state = attr->qp_state;
3569         else
3570                 new_qp_state = old_qp_state;
3571
3572         if (QLNX_IS_ROCE(dev)) {
3573 #if __FreeBSD_version >= 1100000
3574                 if (!ib_modify_qp_is_ok(old_qp_state,
3575                                         new_qp_state,
3576                                         ibqp->qp_type,
3577                                         attr_mask,
3578                                         IB_LINK_LAYER_ETHERNET)) {
3579                         QL_DPRINT12(ha,
3580                                 "invalid attribute mask=0x%x"
3581                                 " specified for qpn=0x%x of type=0x%x \n"
3582                                 " old_qp_state=0x%x, new_qp_state=0x%x\n",
3583                                 attr_mask, qp->qp_id, ibqp->qp_type,
3584                                 old_qp_state, new_qp_state);
3585                         rc = -EINVAL;
3586                         goto err;
3587                 }
3588 #else
3589                 if (!ib_modify_qp_is_ok(old_qp_state,
3590                                         new_qp_state,
3591                                         ibqp->qp_type,
3592                                         attr_mask )) {
3593                         QL_DPRINT12(ha,
3594                                 "invalid attribute mask=0x%x"
3595                                 " specified for qpn=0x%x of type=0x%x \n"
3596                                 " old_qp_state=0x%x, new_qp_state=0x%x\n",
3597                                 attr_mask, qp->qp_id, ibqp->qp_type,
3598                                 old_qp_state, new_qp_state);
3599                         rc = -EINVAL;
3600                         goto err;
3601                 }
3602
3603 #endif /* #if __FreeBSD_version >= 1100000 */
3604         }
3605         /* translate the masks... */
3606         if (attr_mask & IB_QP_STATE) {
3607                 SET_FIELD(qp_params.modify_flags,
3608                           ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
3609                 qp_params.new_state = qlnxr_get_state_from_ibqp(attr->qp_state);
3610         }
3611
3612         // TBD consider changing ecore to be a flag as well...
3613         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
3614                 qp_params.sqd_async = true;
3615
3616         if (attr_mask & IB_QP_PKEY_INDEX) {
3617                 SET_FIELD(qp_params.modify_flags,
3618                           ECORE_ROCE_MODIFY_QP_VALID_PKEY,
3619                           1);
3620                 if (attr->pkey_index >= QLNXR_ROCE_PKEY_TABLE_LEN) {
3621                         rc = -EINVAL;
3622                         goto err;
3623                 }
3624
3625                 qp_params.pkey = QLNXR_ROCE_PKEY_DEFAULT;
3626         }
3627
3628         if (attr_mask & IB_QP_QKEY) {
3629                 qp->qkey = attr->qkey;
3630         }
3631
3632         /* tbd consider splitting in ecore.. */
3633         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3634                 SET_FIELD(qp_params.modify_flags,
3635                           ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
3636                 qp_params.incoming_rdma_read_en =
3637                         attr->qp_access_flags & IB_ACCESS_REMOTE_READ;
3638                 qp_params.incoming_rdma_write_en =
3639                         attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE;
3640                 qp_params.incoming_atomic_en =
3641                         attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC;
3642         }
3643
3644         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
3645                 if (attr_mask & IB_QP_PATH_MTU) {
3646                         if (attr->path_mtu < IB_MTU_256 ||
3647                             attr->path_mtu > IB_MTU_4096) {
3648
3649                                 QL_DPRINT12(ha,
3650                                         "Only MTU sizes of 256, 512, 1024,"
3651                                         " 2048 and 4096 are supported "
3652                                         " attr->path_mtu = [%d]\n",
3653                                         attr->path_mtu);
3654
3655                                 rc = -EINVAL;
3656                                 goto err;
3657                         }
3658                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
3659                                       ib_mtu_enum_to_int(
3660                                                 iboe_get_mtu(dev->ha->ifp->if_mtu)));
3661                 }
3662
3663                 if (qp->mtu == 0) {
3664                         qp->mtu = ib_mtu_enum_to_int(
3665                                         iboe_get_mtu(dev->ha->ifp->if_mtu));
3666                         QL_DPRINT12(ha, "fixing zetoed MTU to qp->mtu = %d\n",
3667                                 qp->mtu);
3668                 }
3669
3670                 SET_FIELD(qp_params.modify_flags,
3671                           ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR,
3672                           1);
3673
3674                 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
3675                 qp_params.flow_label = attr->ah_attr.grh.flow_label;
3676                 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
3677
3678                 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
3679
3680                 get_gid_info(ibqp, attr, attr_mask, dev, qp, &qp_params);
3681
3682                 rc = qlnxr_get_dmac(dev, &attr->ah_attr, qp_params.remote_mac_addr);
3683                 if (rc)
3684                         return rc;
3685
3686                 qp_params.use_local_mac = true;
3687                 memcpy(qp_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
3688
3689                 QL_DPRINT12(ha, "dgid=0x%x:0x%x:0x%x:0x%x\n",
3690                        qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
3691                        qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
3692                 QL_DPRINT12(ha, "sgid=0x%x:0x%x:0x%x:0x%x\n",
3693                        qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
3694                        qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
3695                 QL_DPRINT12(ha,
3696                         "remote_mac=[0x%x:0x%x:0x%x:0x%x:0x%x:0x%x]\n",
3697                         qp_params.remote_mac_addr[0],
3698                         qp_params.remote_mac_addr[1],
3699                         qp_params.remote_mac_addr[2],
3700                         qp_params.remote_mac_addr[3],
3701                         qp_params.remote_mac_addr[4],
3702                         qp_params.remote_mac_addr[5]);
3703
3704                 qp_params.mtu = qp->mtu;
3705         }
3706
3707         if (qp_params.mtu == 0) {
3708                 /* stay with current MTU */
3709                 if (qp->mtu) {
3710                         qp_params.mtu = qp->mtu;
3711                 } else {
3712                         qp_params.mtu = ib_mtu_enum_to_int(
3713                                                 iboe_get_mtu(dev->ha->ifp->if_mtu));
3714                 }
3715         }
3716
3717         if (attr_mask & IB_QP_TIMEOUT) {
3718                 SET_FIELD(qp_params.modify_flags, \
3719                         ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
3720
3721                 qp_params.ack_timeout = attr->timeout;
3722                 if (attr->timeout) {
3723                         u32 temp;
3724
3725                         /* 12.7.34 LOCAL ACK TIMEOUT
3726                          * Value representing the transport (ACK) timeout for
3727                          * use by the remote, expressed as (4.096 Î¼S*2Local ACK
3728                          * Timeout)
3729                          */
3730                         /* We use 1UL since the temporal value may be  overflow
3731                          * 32 bits
3732                          */
3733                         temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
3734                         qp_params.ack_timeout = temp; /* FW requires [msec] */
3735                 }
3736                 else
3737                         qp_params.ack_timeout = 0; /* infinite */
3738         }
3739         if (attr_mask & IB_QP_RETRY_CNT) {
3740                 SET_FIELD(qp_params.modify_flags,\
3741                          ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
3742                 qp_params.retry_cnt = attr->retry_cnt;
3743         }
3744
3745         if (attr_mask & IB_QP_RNR_RETRY) {
3746                 SET_FIELD(qp_params.modify_flags,
3747                           ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT,
3748                           1);
3749                 qp_params.rnr_retry_cnt = attr->rnr_retry;
3750         }
3751
3752         if (attr_mask & IB_QP_RQ_PSN) {
3753                 SET_FIELD(qp_params.modify_flags,
3754                           ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN,
3755                           1);
3756                 qp_params.rq_psn = attr->rq_psn;
3757                 qp->rq_psn = attr->rq_psn;
3758         }
3759
3760         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3761                 if (attr->max_rd_atomic > qattr->max_qp_req_rd_atomic_resc) {
3762                         rc = -EINVAL;
3763                         QL_DPRINT12(ha,
3764                                 "unsupported  max_rd_atomic=%d, supported=%d\n",
3765                                 attr->max_rd_atomic,
3766                                 qattr->max_qp_req_rd_atomic_resc);
3767                         goto err;
3768                 }
3769
3770                 SET_FIELD(qp_params.modify_flags,
3771                           ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ,
3772                           1);
3773                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
3774         }
3775
3776         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
3777                 SET_FIELD(qp_params.modify_flags,
3778                           ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER,
3779                           1);
3780                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
3781         }
3782
3783         if (attr_mask & IB_QP_SQ_PSN) {
3784                 SET_FIELD(qp_params.modify_flags,
3785                           ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN,
3786                           1);
3787                 qp_params.sq_psn = attr->sq_psn;
3788                 qp->sq_psn = attr->sq_psn;
3789         }
3790
3791         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3792                 if (attr->max_dest_rd_atomic >
3793                     qattr->max_qp_resp_rd_atomic_resc) {
3794                         QL_DPRINT12(ha,
3795                                 "unsupported max_dest_rd_atomic=%d, "
3796                                 "supported=%d\n",
3797                                 attr->max_dest_rd_atomic,
3798                                 qattr->max_qp_resp_rd_atomic_resc);
3799
3800                         rc = -EINVAL;
3801                         goto err;
3802                 }
3803
3804                 SET_FIELD(qp_params.modify_flags,
3805                           ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP,
3806                           1);
3807                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
3808         }
3809
3810         if (attr_mask & IB_QP_DEST_QPN) {
3811                 SET_FIELD(qp_params.modify_flags,
3812                           ECORE_ROCE_MODIFY_QP_VALID_DEST_QP,
3813                           1);
3814
3815                 qp_params.dest_qp = attr->dest_qp_num;
3816                 qp->dest_qp_num = attr->dest_qp_num;
3817         }
3818
3819         /*
3820          * Update the QP state before the actual ramrod to prevent a race with
3821          * fast path. Modifying the QP state to error will cause the device to
3822          * flush the CQEs and while polling the flushed CQEs will considered as
3823          * a potential issue if the QP isn't in error state.
3824          */
3825         if ((attr_mask & IB_QP_STATE) && (qp->qp_type != IB_QPT_GSI) &&
3826                 (!udata) && (qp_params.new_state == ECORE_ROCE_QP_STATE_ERR))
3827                 qp->state = ECORE_ROCE_QP_STATE_ERR;
3828
3829         if (qp->qp_type != IB_QPT_GSI)
3830                 rc = ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
3831
3832         if (attr_mask & IB_QP_STATE) {
3833                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
3834                         rc = qlnxr_update_qp_state(dev, qp, qp_params.new_state);
3835                 qp->state = qp_params.new_state;
3836         }
3837
3838 err:
3839         QL_DPRINT12(ha, "exit\n");
3840         return rc;
3841 }
3842
3843 static int
3844 qlnxr_to_ib_qp_acc_flags(struct ecore_rdma_query_qp_out_params *params)
3845 {
3846         int ib_qp_acc_flags = 0;
3847
3848         if (params->incoming_rdma_write_en)
3849                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
3850         if (params->incoming_rdma_read_en)
3851                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
3852         if (params->incoming_atomic_en)
3853                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
3854         if (true) /* FIXME -> local write ?? */
3855                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
3856
3857         return ib_qp_acc_flags;
3858 }
3859
3860 static enum ib_mtu
3861 qlnxr_mtu_int_to_enum(u16 mtu)
3862 {
3863         enum ib_mtu ib_mtu_size;
3864
3865         switch (mtu) {
3866         case 256:
3867                 ib_mtu_size = IB_MTU_256;
3868                 break;
3869
3870         case 512:
3871                 ib_mtu_size = IB_MTU_512;
3872                 break;
3873
3874         case 1024:
3875                 ib_mtu_size = IB_MTU_1024;
3876                 break;
3877
3878         case 2048:
3879                 ib_mtu_size = IB_MTU_2048;
3880                 break;
3881
3882         case 4096:
3883                 ib_mtu_size = IB_MTU_4096;
3884                 break;
3885
3886         default:
3887                 ib_mtu_size = IB_MTU_1024;
3888                 break;
3889         }
3890         return (ib_mtu_size);
3891 }
3892
3893 int
3894 qlnxr_query_qp(struct ib_qp *ibqp,
3895         struct ib_qp_attr *qp_attr,
3896         int attr_mask,
3897         struct ib_qp_init_attr *qp_init_attr)
3898 {
3899         int rc = 0;
3900         struct ecore_rdma_query_qp_out_params params;
3901         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3902         struct qlnxr_dev *dev = qp->dev;
3903         qlnx_host_t     *ha;
3904
3905         ha = dev->ha;
3906
3907         QL_DPRINT12(ha, "enter\n");
3908
3909         memset(&params, 0, sizeof(params));
3910
3911         rc = ecore_rdma_query_qp(dev->rdma_ctx, qp->ecore_qp, &params);
3912         if (rc)
3913                 goto err;
3914
3915         memset(qp_attr, 0, sizeof(*qp_attr));
3916         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3917
3918         qp_attr->qp_state = qlnxr_get_ibqp_state(params.state);
3919         qp_attr->cur_qp_state = qlnxr_get_ibqp_state(params.state);
3920
3921         /* In some cases in iWARP qelr will ask for the state only */
3922         if (QLNX_IS_IWARP(dev) && (attr_mask == IB_QP_STATE)) {
3923                 QL_DPRINT11(ha, "only state requested\n");
3924                 return 0;
3925         }
3926
3927         qp_attr->path_mtu = qlnxr_mtu_int_to_enum(params.mtu);
3928         qp_attr->path_mig_state = IB_MIG_MIGRATED;
3929         qp_attr->rq_psn = params.rq_psn;
3930         qp_attr->sq_psn = params.sq_psn;
3931         qp_attr->dest_qp_num = params.dest_qp;
3932
3933         qp_attr->qp_access_flags = qlnxr_to_ib_qp_acc_flags(&params);
3934
3935         QL_DPRINT12(ha, "qp_state = 0x%x cur_qp_state = 0x%x "
3936                 "path_mtu = %d qp_access_flags = 0x%x\n",
3937                 qp_attr->qp_state, qp_attr->cur_qp_state, qp_attr->path_mtu,
3938                 qp_attr->qp_access_flags);
3939
3940         qp_attr->cap.max_send_wr = qp->sq.max_wr;
3941         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
3942         qp_attr->cap.max_send_sge = qp->sq.max_sges;
3943         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
3944         qp_attr->cap.max_inline_data = qp->max_inline_data;
3945         qp_init_attr->cap = qp_attr->cap;
3946
3947         memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
3948                sizeof(qp_attr->ah_attr.grh.dgid.raw));
3949
3950         qp_attr->ah_attr.grh.flow_label = params.flow_label;
3951         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
3952         qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
3953         qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
3954
3955         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
3956         qp_attr->ah_attr.port_num = 1; /* FIXME -> check this */
3957         qp_attr->ah_attr.sl = 0;/* FIXME -> check this */
3958         qp_attr->timeout = params.timeout;
3959         qp_attr->rnr_retry = params.rnr_retry;
3960         qp_attr->retry_cnt = params.retry_cnt;
3961         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
3962         qp_attr->pkey_index = params.pkey_index;
3963         qp_attr->port_num = 1; /* FIXME -> check this */
3964         qp_attr->ah_attr.src_path_bits = 0;
3965         qp_attr->ah_attr.static_rate = 0;
3966         qp_attr->alt_pkey_index = 0;
3967         qp_attr->alt_port_num = 0;
3968         qp_attr->alt_timeout = 0;
3969         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
3970
3971         qp_attr->sq_draining = (params.state == ECORE_ROCE_QP_STATE_SQD) ? 1 : 0;
3972         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
3973         qp_attr->max_rd_atomic = params.max_rd_atomic;
3974         qp_attr->en_sqd_async_notify = (params.sqd_async)? 1 : 0;
3975
3976         QL_DPRINT12(ha, "max_inline_data=%d\n",
3977                 qp_attr->cap.max_inline_data);
3978
3979 err:
3980         QL_DPRINT12(ha, "exit\n");
3981         return rc;
3982 }
3983
3984
3985 static void
3986 qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3987 {
3988         qlnx_host_t     *ha;
3989
3990         ha = dev->ha;
3991
3992         QL_DPRINT12(ha, "enter\n");
3993  
3994         if (qp->usq.umem)
3995                 ib_umem_release(qp->usq.umem);
3996
3997         qp->usq.umem = NULL;
3998
3999         if (qp->urq.umem)
4000                 ib_umem_release(qp->urq.umem);
4001
4002         qp->urq.umem = NULL;
4003
4004         QL_DPRINT12(ha, "exit\n");
4005         return;
4006 }
4007
4008 static void
4009 qlnxr_cleanup_kernel(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
4010 {
4011         qlnx_host_t     *ha;
4012
4013         ha = dev->ha;
4014
4015         QL_DPRINT12(ha, "enter\n");
4016  
4017         if (qlnxr_qp_has_sq(qp)) {
4018                 QL_DPRINT12(ha, "freeing SQ\n");
4019                 ha->qlnxr_debug = 1;
4020 //              ecore_chain_free(dev->cdev, &qp->sq.pbl);
4021                 ha->qlnxr_debug = 0;
4022                 kfree(qp->wqe_wr_id);
4023         }
4024
4025         if (qlnxr_qp_has_rq(qp)) {
4026                 QL_DPRINT12(ha, "freeing RQ\n");
4027                 ha->qlnxr_debug = 1;
4028         //      ecore_chain_free(dev->cdev, &qp->rq.pbl);
4029                 ha->qlnxr_debug = 0;
4030                 kfree(qp->rqe_wr_id);
4031         }
4032
4033         QL_DPRINT12(ha, "exit\n");
4034         return;
4035 }
4036
4037 int
4038 qlnxr_free_qp_resources(struct qlnxr_dev *dev,
4039         struct qlnxr_qp *qp)
4040 {
4041         int             rc = 0;
4042         qlnx_host_t     *ha;
4043         struct ecore_rdma_destroy_qp_out_params d_out_params;
4044
4045         ha = dev->ha;
4046
4047         QL_DPRINT12(ha, "enter\n");
4048  
4049 #if 0
4050         if (qp->qp_type != IB_QPT_GSI) {
4051                 rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
4052                                 &d_out_params);
4053                 if (rc)
4054                         return rc;
4055         }
4056
4057         if (qp->ibqp.uobject && qp->ibqp.uobject->context)
4058                 qlnxr_cleanup_user(dev, qp);
4059         else
4060                 qlnxr_cleanup_kernel(dev, qp);
4061 #endif
4062
4063         if (qp->ibqp.uobject && qp->ibqp.uobject->context)
4064                 qlnxr_cleanup_user(dev, qp);
4065         else
4066                 qlnxr_cleanup_kernel(dev, qp);
4067
4068         if (qp->qp_type != IB_QPT_GSI) {
4069                 rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
4070                                 &d_out_params);
4071                 if (rc)
4072                         return rc;
4073         }
4074
4075         QL_DPRINT12(ha, "exit\n");
4076         return 0;
4077 }
4078
4079 int
4080 qlnxr_destroy_qp(struct ib_qp *ibqp)
4081 {
4082         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
4083         struct qlnxr_dev *dev = qp->dev;
4084         int rc = 0;
4085         struct ib_qp_attr attr;
4086         int attr_mask = 0;
4087         qlnx_host_t     *ha;
4088
4089         ha = dev->ha;
4090
4091         QL_DPRINT12(ha, "enter qp = %p, qp_type=%d\n", qp, qp->qp_type);
4092
4093         qp->destroyed = 1;
4094
4095         if (QLNX_IS_ROCE(dev) && (qp->state != (ECORE_ROCE_QP_STATE_RESET |
4096                                   ECORE_ROCE_QP_STATE_ERR |
4097                                   ECORE_ROCE_QP_STATE_INIT))) {
4098
4099                 attr.qp_state = IB_QPS_ERR;
4100                 attr_mask |= IB_QP_STATE;
4101
4102                 /* change the QP state to ERROR */
4103                 qlnxr_modify_qp(ibqp, &attr, attr_mask, NULL);
4104         }
4105
4106         if (qp->qp_type == IB_QPT_GSI)
4107                 qlnxr_destroy_gsi_qp(dev);
4108
4109         qp->sig = ~qp->sig;
4110
4111         qlnxr_free_qp_resources(dev, qp);
4112
4113         if (atomic_dec_and_test(&qp->refcnt)) {
4114                 /* TODO: only for iWARP? */
4115                 qlnxr_idr_remove(dev, qp->qp_id);
4116                 kfree(qp);
4117         }
4118
4119         QL_DPRINT12(ha, "exit\n");
4120         return rc;
4121 }
4122
4123 static inline int
4124 qlnxr_wq_is_full(struct qlnxr_qp_hwq_info *wq)
4125 {
4126         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
4127 }
4128
4129 static int
4130 sge_data_len(struct ib_sge *sg_list, int num_sge)
4131 {
4132         int i, len = 0;
4133         for (i = 0; i < num_sge; i++)
4134                 len += sg_list[i].length;
4135         return len;
4136 }
4137
4138 static void
4139 swap_wqe_data64(u64 *p)
4140 {
4141         int i;
4142
4143         for (i = 0; i < QLNXR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
4144                 *p = cpu_to_be64(cpu_to_le64(*p));
4145 }
4146
4147
4148 static u32
4149 qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
4150         struct qlnxr_qp         *qp,
4151         u8                      *wqe_size,
4152         struct ib_send_wr       *wr,
4153         struct ib_send_wr       **bad_wr,
4154         u8                      *bits,
4155         u8                      bit)
4156 {
4157         int i, seg_siz;
4158         char *seg_prt, *wqe;
4159         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
4160         qlnx_host_t     *ha;
4161
4162         ha = dev->ha;
4163
4164         QL_DPRINT12(ha, "enter[%d]\n", data_size);
4165
4166         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
4167                 QL_DPRINT12(ha,
4168                         "Too much inline data in WR:[%d, %d]\n",
4169                         data_size, ROCE_REQ_MAX_INLINE_DATA_SIZE);
4170                 *bad_wr = wr;
4171                 return 0;
4172         }
4173
4174         if (!data_size)
4175                 return data_size;
4176
4177         /* set the bit */
4178         *bits |= bit;
4179
4180         seg_prt = wqe = NULL;
4181         seg_siz = 0;
4182
4183         /* copy data inline */
4184         for (i = 0; i < wr->num_sge; i++) {
4185                 u32 len = wr->sg_list[i].length;
4186                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
4187
4188                 while (len > 0) {
4189                         u32 cur;
4190
4191                         /* new segment required */
4192                         if (!seg_siz) {
4193                                 wqe = (char *)ecore_chain_produce(&qp->sq.pbl);
4194                                 seg_prt = wqe;
4195                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
4196                                 (*wqe_size)++;
4197                         }
4198
4199                         /* calculate currently allowed length */
4200                         cur = MIN(len, seg_siz);
4201
4202                         memcpy(seg_prt, src, cur);
4203
4204                         /* update segment variables */
4205                         seg_prt += cur;
4206                         seg_siz -= cur;
4207                         /* update sge variables */
4208                         src += cur;
4209                         len -= cur;
4210
4211                         /* swap fully-completed segments */
4212                         if (!seg_siz)
4213                                 swap_wqe_data64((u64 *)wqe);
4214                 }
4215         }
4216
4217         /* swap last not completed segment */
4218         if (seg_siz)
4219                 swap_wqe_data64((u64 *)wqe);
4220
4221         QL_DPRINT12(ha, "exit\n");
4222         return data_size;
4223 }
4224
4225 static u32
4226 qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
4227         u8 *wqe_size, struct ib_send_wr *wr)
4228 {
4229         int i;
4230         u32 data_size = 0;
4231         qlnx_host_t     *ha;
4232
4233         ha = dev->ha;
4234
4235         QL_DPRINT12(ha, "enter wr->num_sge = %d \n", wr->num_sge);
4236  
4237         for (i = 0; i < wr->num_sge; i++) {
4238                 struct rdma_sq_sge *sge = ecore_chain_produce(&qp->sq.pbl);
4239
4240                 TYPEPTR_ADDR_SET(sge, addr, wr->sg_list[i].addr);
4241                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
4242                 sge->length = cpu_to_le32(wr->sg_list[i].length);
4243                 data_size += wr->sg_list[i].length;
4244         }
4245
4246         if (wqe_size)
4247                 *wqe_size += wr->num_sge;
4248
4249         QL_DPRINT12(ha, "exit data_size = %d\n", data_size);
4250         return data_size;
4251 }
4252
4253 static u32
4254 qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
4255         struct qlnxr_qp *qp,
4256         struct rdma_sq_rdma_wqe_1st *rwqe,
4257         struct rdma_sq_rdma_wqe_2nd *rwqe2,
4258         struct ib_send_wr *wr,
4259         struct ib_send_wr **bad_wr)
4260 {
4261         qlnx_host_t     *ha;
4262         u32             ret = 0;
4263
4264         ha = dev->ha;
4265
4266         QL_DPRINT12(ha, "enter\n");
4267  
4268         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
4269         TYPEPTR_ADDR_SET(rwqe2, remote_va, rdma_wr(wr)->remote_addr);
4270
4271         if (wr->send_flags & IB_SEND_INLINE) {
4272                 u8 flags = 0;
4273                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
4274                 return qlnxr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size,
4275                                 wr, bad_wr, &rwqe->flags, flags);
4276         }
4277
4278         ret = qlnxr_prepare_sq_sges(dev, qp, &rwqe->wqe_size, wr);
4279
4280         QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4281
4282         return (ret);
4283 }
4284
4285 static u32
4286 qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
4287         struct qlnxr_qp *qp,
4288         struct rdma_sq_send_wqe *swqe,
4289         struct rdma_sq_send_wqe *swqe2,
4290         struct ib_send_wr *wr,
4291         struct ib_send_wr **bad_wr)
4292 {
4293         qlnx_host_t     *ha;
4294         u32             ret = 0;
4295
4296         ha = dev->ha;
4297
4298         QL_DPRINT12(ha, "enter\n");
4299  
4300         memset(swqe2, 0, sizeof(*swqe2));
4301
4302         if (wr->send_flags & IB_SEND_INLINE) {
4303                 u8 flags = 0;
4304                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
4305                 return qlnxr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size,
4306                                 wr, bad_wr, &swqe->flags, flags);
4307         }
4308
4309         ret = qlnxr_prepare_sq_sges(dev, qp, &swqe->wqe_size, wr);
4310
4311         QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4312
4313         return (ret);
4314 }
4315
4316 static void
4317 qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
4318 {
4319         qlnx_host_t     *ha;
4320
4321         ha = dev->ha;
4322
4323         int work = info->completed - info->completed_handled - 1;
4324
4325         QL_DPRINT12(ha, "enter [%d]\n", work);
4326  
4327         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
4328                 struct qlnxr_pbl *pbl;
4329
4330                 /* Free all the page list that are possible to be freed
4331                  * (all the ones that were invalidated), under the assumption
4332                  * that if an FMR was completed successfully that means that
4333                  * if there was an invalidate operation before it also ended
4334                  */
4335                 pbl = list_first_entry(&info->inuse_pbl_list,
4336                                        struct qlnxr_pbl,
4337                                        list_entry);
4338                 list_del(&pbl->list_entry);
4339                 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
4340                 info->completed_handled++;
4341         }
4342
4343         QL_DPRINT12(ha, "exit\n");
4344         return;
4345 }
4346
4347 #if __FreeBSD_version >= 1102000
4348
4349 static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
4350                 struct rdma_sq_fmr_wqe_1st *fwqe1,
4351                 struct ib_reg_wr *wr)
4352 {
4353         struct qlnxr_mr *mr = get_qlnxr_mr(wr->mr);
4354         struct rdma_sq_fmr_wqe_2nd *fwqe2;
4355
4356         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)ecore_chain_produce(&qp->sq.pbl);
4357         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
4358         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
4359         fwqe1->l_key = wr->key;
4360
4361         fwqe2->access_ctrl = 0;
4362
4363         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
4364                 !!(wr->access & IB_ACCESS_REMOTE_READ));
4365         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
4366                 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
4367         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
4368                 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
4369         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
4370         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
4371                 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
4372         fwqe2->fmr_ctrl = 0;
4373
4374         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4375                 ilog2(mr->ibmr.page_size) - 12);
4376
4377         fwqe2->length_hi = 0; /* TODO - figure out why length is only 32bit.. */
4378         fwqe2->length_lo = mr->ibmr.length;
4379         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
4380         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
4381
4382         qp->wqe_wr_id[qp->sq.prod].mr = mr;
4383
4384         return 0;
4385 }
4386
4387 #else
4388
4389 static void
4390 build_frmr_pbes(struct qlnxr_dev *dev, struct ib_send_wr *wr,
4391         struct mr_info *info)
4392 {
4393         int i;
4394         u64 buf_addr = 0;
4395         int num_pbes, total_num_pbes = 0;
4396         struct regpair *pbe;
4397         struct qlnxr_pbl *pbl_tbl = info->pbl_table;
4398         struct qlnxr_pbl_info *pbl_info = &info->pbl_info;
4399         qlnx_host_t     *ha;
4400
4401         ha = dev->ha;
4402
4403         QL_DPRINT12(ha, "enter\n");
4404  
4405         pbe = (struct regpair *)pbl_tbl->va;
4406         num_pbes = 0;
4407
4408         for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
4409                 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
4410                 pbe->lo = cpu_to_le32((u32)buf_addr);
4411                 pbe->hi = cpu_to_le32((u32)upper_32_bits(buf_addr));
4412
4413                 num_pbes += 1;
4414                 pbe++;
4415                 total_num_pbes++;
4416
4417                 if (total_num_pbes == pbl_info->num_pbes)
4418                         return;
4419
4420                 /* if the given pbl is full storing the pbes,
4421                  * move to next pbl.
4422                  */
4423                 if (num_pbes ==
4424                     (pbl_info->pbl_size / sizeof(u64))) {
4425                         pbl_tbl++;
4426                         pbe = (struct regpair *)pbl_tbl->va;
4427                         num_pbes = 0;
4428                 }
4429         }
4430         QL_DPRINT12(ha, "exit\n");
4431
4432         return;
4433 }
4434
4435 static int
4436 qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
4437 {
4438         int rc = 0;
4439         qlnx_host_t     *ha;
4440
4441         ha = dev->ha;
4442
4443         QL_DPRINT12(ha, "enter\n");
4444  
4445         if (info->completed == 0) {
4446                 //DP_VERBOSE(dev, QLNXR_MSG_MR, "First FMR\n");
4447                 /* first fmr */
4448                 return 0;
4449         }
4450
4451         qlnx_handle_completed_mrs(dev, info);
4452
4453         list_add_tail(&info->pbl_table->list_entry, &info->inuse_pbl_list);
4454
4455         if (list_empty(&info->free_pbl_list)) {
4456                 info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info,
4457                                                           GFP_ATOMIC);
4458         } else {
4459                 info->pbl_table = list_first_entry(&info->free_pbl_list,
4460                                         struct qlnxr_pbl,
4461                                         list_entry);
4462                 list_del(&info->pbl_table->list_entry);
4463         }
4464
4465         if (!info->pbl_table)
4466                 rc = -ENOMEM;
4467
4468         QL_DPRINT12(ha, "exit\n");
4469         return rc;
4470 }
4471
4472 static inline int
4473 qlnxr_prepare_fmr(struct qlnxr_qp *qp,
4474         struct rdma_sq_fmr_wqe_1st *fwqe1,
4475         struct ib_send_wr *wr)
4476 {
4477         struct qlnxr_dev *dev = qp->dev;
4478         u64 fbo;
4479         struct qlnxr_fast_reg_page_list *frmr_list =
4480                 get_qlnxr_frmr_list(wr->wr.fast_reg.page_list);
4481         struct rdma_sq_fmr_wqe *fwqe2 =
4482                 (struct rdma_sq_fmr_wqe *)ecore_chain_produce(&qp->sq.pbl);
4483         int rc = 0;
4484         qlnx_host_t     *ha;
4485
4486         ha = dev->ha;
4487
4488         QL_DPRINT12(ha, "enter\n");
4489  
4490         if (wr->wr.fast_reg.page_list_len == 0)
4491                 BUG();
4492
4493         rc = qlnxr_prepare_safe_pbl(dev, &frmr_list->info);
4494         if (rc)
4495                 return rc;
4496
4497         fwqe1->addr.hi = upper_32_bits(wr->wr.fast_reg.iova_start);
4498         fwqe1->addr.lo = lower_32_bits(wr->wr.fast_reg.iova_start);
4499         fwqe1->l_key = wr->wr.fast_reg.rkey;
4500
4501         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_READ,
4502                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ));
4503         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_WRITE,
4504                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE));
4505         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_ENABLE_ATOMIC,
4506                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_ATOMIC));
4507         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_READ, 1);
4508         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_WRITE,
4509                    !!(wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE));
4510
4511         fwqe2->fmr_ctrl = 0;
4512
4513         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4514                    ilog2(1 << wr->wr.fast_reg.page_shift) - 12);
4515         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_ZERO_BASED, 0);
4516
4517         fwqe2->length_hi = 0; /* Todo - figure this out... why length is only 32bit.. */
4518         fwqe2->length_lo = wr->wr.fast_reg.length;
4519         fwqe2->pbl_addr.hi = upper_32_bits(frmr_list->info.pbl_table->pa);
4520         fwqe2->pbl_addr.lo = lower_32_bits(frmr_list->info.pbl_table->pa);
4521
4522         /* produce another wqe for fwqe3 */
4523         ecore_chain_produce(&qp->sq.pbl);
4524
4525         fbo = wr->wr.fast_reg.iova_start -
4526             (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
4527
4528         QL_DPRINT12(ha, "wr.fast_reg.iova_start = %p rkey=%x addr=%x:%x"
4529                 " length = %x pbl_addr %x:%x\n",
4530                 wr->wr.fast_reg.iova_start, wr->wr.fast_reg.rkey,
4531                 fwqe1->addr.hi, fwqe1->addr.lo, fwqe2->length_lo,
4532                 fwqe2->pbl_addr.hi, fwqe2->pbl_addr.lo);
4533
4534         build_frmr_pbes(dev, wr, &frmr_list->info);
4535
4536         qp->wqe_wr_id[qp->sq.prod].frmr = frmr_list;
4537
4538         QL_DPRINT12(ha, "exit\n");
4539         return 0;
4540 }
4541
4542 #endif /* #if __FreeBSD_version >= 1102000 */
4543
4544 static enum ib_wc_opcode
4545 qlnxr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
4546 {
4547         switch (opcode) {
4548         case IB_WR_RDMA_WRITE:
4549         case IB_WR_RDMA_WRITE_WITH_IMM:
4550                 return IB_WC_RDMA_WRITE;
4551         case IB_WR_SEND_WITH_IMM:
4552         case IB_WR_SEND:
4553         case IB_WR_SEND_WITH_INV:
4554                 return IB_WC_SEND;
4555         case IB_WR_RDMA_READ:
4556                 return IB_WC_RDMA_READ;
4557         case IB_WR_ATOMIC_CMP_AND_SWP:
4558                 return IB_WC_COMP_SWAP;
4559         case IB_WR_ATOMIC_FETCH_AND_ADD:
4560                 return IB_WC_FETCH_ADD;
4561
4562 #if __FreeBSD_version >= 1102000
4563         case IB_WR_REG_MR:
4564                 return IB_WC_REG_MR;
4565 #else
4566         case IB_WR_FAST_REG_MR:
4567                 return IB_WC_FAST_REG_MR;
4568 #endif /* #if __FreeBSD_version >= 1102000 */
4569
4570         case IB_WR_LOCAL_INV:
4571                 return IB_WC_LOCAL_INV;
4572         default:
4573                 return IB_WC_SEND;
4574         }
4575 }
4576 static inline bool
4577 qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
4578 {
4579         int wq_is_full, err_wr, pbl_is_full;
4580         struct qlnxr_dev *dev = qp->dev;
4581         qlnx_host_t     *ha;
4582
4583         ha = dev->ha;
4584
4585         QL_DPRINT12(ha, "enter[qp, wr] = [%p,%p]\n", qp, wr);
4586  
4587         /* prevent SQ overflow and/or processing of a bad WR */
4588         err_wr = wr->num_sge > qp->sq.max_sges;
4589         wq_is_full = qlnxr_wq_is_full(&qp->sq);
4590         pbl_is_full = ecore_chain_get_elem_left_u32(&qp->sq.pbl) <
4591                       QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
4592         if (wq_is_full || err_wr || pbl_is_full) {
4593                 if (wq_is_full &&
4594                     !(qp->err_bitmap & QLNXR_QP_ERR_SQ_FULL)) {
4595
4596                         qp->err_bitmap |= QLNXR_QP_ERR_SQ_FULL;
4597
4598                         QL_DPRINT12(ha,
4599                                 "error: WQ is full. Post send on QP failed"
4600                                 " (this error appears only once) "
4601                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4602                                 qp, wr, qp->err_bitmap);
4603                 }
4604
4605                 if (err_wr &&
4606                     !(qp->err_bitmap & QLNXR_QP_ERR_BAD_SR)) {
4607
4608                         qp->err_bitmap |= QLNXR_QP_ERR_BAD_SR;
4609
4610                         QL_DPRINT12(ha,
4611                                 "error: WQ is bad. Post send on QP failed"
4612                                 " (this error appears only once) "
4613                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4614                                 qp, wr, qp->err_bitmap);
4615                 }
4616
4617                 if (pbl_is_full &&
4618                     !(qp->err_bitmap & QLNXR_QP_ERR_SQ_PBL_FULL)) {
4619
4620                         qp->err_bitmap |= QLNXR_QP_ERR_SQ_PBL_FULL;
4621
4622                         QL_DPRINT12(ha,
4623                                 "error: WQ PBL is full. Post send on QP failed"
4624                                 " (this error appears only once) "
4625                                 "[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4626                                 qp, wr, qp->err_bitmap);
4627                 }
4628                 return false;
4629         }
4630         QL_DPRINT12(ha, "exit[qp, wr] = [%p,%p]\n", qp, wr);
4631         return true;
4632 }
4633
4634 int
4635 qlnxr_post_send(struct ib_qp *ibqp,
4636         struct ib_send_wr *wr,
4637         struct ib_send_wr **bad_wr)
4638 {
4639         struct qlnxr_dev        *dev = get_qlnxr_dev(ibqp->device);
4640         struct qlnxr_qp         *qp = get_qlnxr_qp(ibqp);
4641         unsigned long           flags;
4642         int                     status = 0, rc = 0;
4643         bool                    comp;
4644         qlnx_host_t             *ha;
4645         uint32_t                reg_addr;
4646  
4647         *bad_wr = NULL;
4648         ha = dev->ha;
4649
4650         QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4651                 ibqp, wr, bad_wr);
4652
4653         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4654                 return -EINVAL;
4655
4656         if (qp->qp_type == IB_QPT_GSI)
4657                 return qlnxr_gsi_post_send(ibqp, wr, bad_wr);
4658
4659         spin_lock_irqsave(&qp->q_lock, flags);
4660
4661         if (QLNX_IS_ROCE(dev) && (qp->state != ECORE_ROCE_QP_STATE_RTS) &&
4662             (qp->state != ECORE_ROCE_QP_STATE_ERR) &&
4663             (qp->state != ECORE_ROCE_QP_STATE_SQD)) {
4664                 spin_unlock_irqrestore(&qp->q_lock, flags);
4665                 *bad_wr = wr;
4666                 QL_DPRINT11(ha, "QP in wrong state! QP icid=0x%x state %d\n",
4667                         qp->icid, qp->state);
4668                 return -EINVAL;
4669         }
4670
4671         if (!wr) {
4672                 QL_DPRINT11(ha, "Got an empty post send???\n");
4673         }
4674
4675         while (wr) {
4676                 struct rdma_sq_common_wqe       *wqe;
4677                 struct rdma_sq_send_wqe         *swqe;
4678                 struct rdma_sq_send_wqe         *swqe2;
4679                 struct rdma_sq_rdma_wqe_1st     *rwqe;
4680                 struct rdma_sq_rdma_wqe_2nd     *rwqe2;
4681                 struct rdma_sq_local_inv_wqe    *iwqe;
4682                 struct rdma_sq_atomic_wqe       *awqe1;
4683                 struct rdma_sq_atomic_wqe       *awqe2;
4684                 struct rdma_sq_atomic_wqe       *awqe3;
4685                 struct rdma_sq_fmr_wqe_1st      *fwqe1;
4686
4687                 if (!qlnxr_can_post_send(qp, wr)) {
4688                         status = -ENOMEM;
4689                         *bad_wr = wr;
4690                         break;
4691                 }
4692
4693                 wqe = ecore_chain_produce(&qp->sq.pbl);
4694
4695                 qp->wqe_wr_id[qp->sq.prod].signaled =
4696                         !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
4697
4698                 /* common fields */
4699                 wqe->flags = 0;
4700                 wqe->flags |= (RDMA_SQ_SEND_WQE_COMP_FLG_MASK <<
4701                                 RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT);
4702
4703                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, \
4704                         !!(wr->send_flags & IB_SEND_SOLICITED));
4705
4706                 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) ||
4707                                 (qp->signaled);
4708
4709                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
4710                 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,  \
4711                         !!(wr->send_flags & IB_SEND_FENCE));
4712
4713                 wqe->prev_wqe_size = qp->prev_wqe_size;
4714
4715                 qp->wqe_wr_id[qp->sq.prod].opcode = qlnxr_ib_to_wc_opcode(wr->opcode);
4716
4717
4718                 switch (wr->opcode) {
4719
4720                 case IB_WR_SEND_WITH_IMM:
4721
4722                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
4723                         swqe = (struct rdma_sq_send_wqe *)wqe;
4724                         swqe->wqe_size = 2;
4725                         swqe2 = (struct rdma_sq_send_wqe *)
4726                                         ecore_chain_produce(&qp->sq.pbl);
4727                         swqe->inv_key_or_imm_data =
4728                                 cpu_to_le32(wr->ex.imm_data);
4729                         swqe->length = cpu_to_le32(
4730                                                 qlnxr_prepare_sq_send_data(dev,
4731                                                         qp, swqe, swqe2, wr,
4732                                                         bad_wr));
4733
4734                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4735                         qp->prev_wqe_size = swqe->wqe_size;
4736                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4737
4738                         QL_DPRINT12(ha, "SEND w/ IMM length = %d imm data=%x\n",
4739                                 swqe->length, wr->ex.imm_data);
4740
4741                         break;
4742
4743                 case IB_WR_SEND:
4744
4745                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
4746                         swqe = (struct rdma_sq_send_wqe *)wqe;
4747
4748                         swqe->wqe_size = 2;
4749                         swqe2 = (struct rdma_sq_send_wqe *)
4750                                         ecore_chain_produce(&qp->sq.pbl);
4751                         swqe->length = cpu_to_le32(
4752                                                 qlnxr_prepare_sq_send_data(dev,
4753                                                         qp, swqe, swqe2, wr,
4754                                                         bad_wr));
4755                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4756                         qp->prev_wqe_size = swqe->wqe_size;
4757                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4758
4759                         QL_DPRINT12(ha, "SEND w/o IMM length = %d\n",
4760                                 swqe->length);
4761
4762                         break;
4763
4764                 case IB_WR_SEND_WITH_INV:
4765
4766                         wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
4767                         swqe = (struct rdma_sq_send_wqe *)wqe;
4768                         swqe2 = (struct rdma_sq_send_wqe *)
4769                                         ecore_chain_produce(&qp->sq.pbl);
4770                         swqe->wqe_size = 2;
4771                         swqe->inv_key_or_imm_data =
4772                                 cpu_to_le32(wr->ex.invalidate_rkey);
4773                         swqe->length = cpu_to_le32(qlnxr_prepare_sq_send_data(dev,
4774                                                 qp, swqe, swqe2, wr, bad_wr));
4775                         qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4776                         qp->prev_wqe_size = swqe->wqe_size;
4777                         qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4778
4779                         QL_DPRINT12(ha, "SEND w INVALIDATE length = %d\n",
4780                                 swqe->length);
4781                         break;
4782
4783                 case IB_WR_RDMA_WRITE_WITH_IMM:
4784
4785                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
4786                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4787
4788                         rwqe->wqe_size = 2;
4789                         rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
4790                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4791                                         ecore_chain_produce(&qp->sq.pbl);
4792                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4793                                                 qp, rwqe, rwqe2, wr, bad_wr));
4794                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4795                         qp->prev_wqe_size = rwqe->wqe_size;
4796                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4797
4798                         QL_DPRINT12(ha,
4799                                 "RDMA WRITE w/ IMM length = %d imm data=%x\n",
4800                                 rwqe->length, rwqe->imm_data);
4801
4802                         break;
4803
4804                 case IB_WR_RDMA_WRITE:
4805
4806                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
4807                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4808
4809                         rwqe->wqe_size = 2;
4810                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4811                                         ecore_chain_produce(&qp->sq.pbl);
4812                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4813                                                 qp, rwqe, rwqe2, wr, bad_wr));
4814                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4815                         qp->prev_wqe_size = rwqe->wqe_size;
4816                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4817
4818                         QL_DPRINT12(ha,
4819                                 "RDMA WRITE w/o IMM length = %d\n",
4820                                 rwqe->length);
4821
4822                         break;
4823
4824                 case IB_WR_RDMA_READ_WITH_INV:
4825
4826                         QL_DPRINT12(ha,
4827                                 "RDMA READ WITH INVALIDATE not supported\n");
4828
4829                         *bad_wr = wr;
4830                         rc = -EINVAL;
4831
4832                         break;
4833
4834                 case IB_WR_RDMA_READ:
4835
4836                         wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
4837                         rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4838
4839                         rwqe->wqe_size = 2;
4840                         rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4841                                         ecore_chain_produce(&qp->sq.pbl);
4842                         rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4843                                                 qp, rwqe, rwqe2, wr, bad_wr));
4844
4845                         qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4846                         qp->prev_wqe_size = rwqe->wqe_size;
4847                         qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4848
4849                         QL_DPRINT12(ha, "RDMA READ length = %d\n",
4850                                 rwqe->length);
4851
4852                         break;
4853
4854                 case IB_WR_ATOMIC_CMP_AND_SWP:
4855                 case IB_WR_ATOMIC_FETCH_AND_ADD:
4856
4857                         QL_DPRINT12(ha,
4858                                 "ATOMIC operation = %s\n",
4859                                 ((wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) ?
4860                                         "IB_WR_ATOMIC_CMP_AND_SWP" : 
4861                                         "IB_WR_ATOMIC_FETCH_AND_ADD"));
4862
4863                         awqe1 = (struct rdma_sq_atomic_wqe *)wqe;
4864                         awqe1->prev_wqe_size = 4;
4865
4866                         awqe2 = (struct rdma_sq_atomic_wqe *)
4867                                         ecore_chain_produce(&qp->sq.pbl);
4868
4869                         TYPEPTR_ADDR_SET(awqe2, remote_va, \
4870                                 atomic_wr(wr)->remote_addr);
4871
4872                         awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
4873
4874                         awqe3 = (struct rdma_sq_atomic_wqe *)
4875                                         ecore_chain_produce(&qp->sq.pbl);
4876
4877                         if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
4878                                 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
4879                                 TYPEPTR_ADDR_SET(awqe3, swap_data,
4880                                                  atomic_wr(wr)->compare_add);
4881                         } else {
4882                                 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
4883                                 TYPEPTR_ADDR_SET(awqe3, swap_data,
4884                                                  atomic_wr(wr)->swap);
4885                                 TYPEPTR_ADDR_SET(awqe3, cmp_data,
4886                                                  atomic_wr(wr)->compare_add);
4887                         }
4888
4889                         qlnxr_prepare_sq_sges(dev, qp, NULL, wr);
4890
4891                         qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->prev_wqe_size;
4892                         qp->prev_wqe_size = awqe1->prev_wqe_size;
4893
4894                         break;
4895
4896                 case IB_WR_LOCAL_INV:
4897
4898                         QL_DPRINT12(ha,
4899                                 "INVALIDATE length (IB_WR_LOCAL_INV)\n");
4900
4901                         iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
4902                         iwqe->prev_wqe_size = 1;
4903
4904                         iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
4905                         iwqe->inv_l_key = wr->ex.invalidate_rkey;
4906                         qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->prev_wqe_size;
4907                         qp->prev_wqe_size = iwqe->prev_wqe_size;
4908
4909                         break;
4910
4911 #if __FreeBSD_version >= 1102000
4912
4913                 case IB_WR_REG_MR:
4914
4915                         QL_DPRINT12(ha, "IB_WR_REG_MR\n");
4916
4917                         wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4918                         fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4919                         fwqe1->wqe_size = 2;
4920
4921                         rc = qlnxr_prepare_reg(qp, fwqe1, reg_wr(wr));
4922                         if (rc) {
4923                                 QL_DPRINT11(ha, "IB_WR_REG_MR failed rc=%d\n", rc);
4924                                 *bad_wr = wr;
4925                                 break;
4926                         }
4927
4928                         qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
4929                         qp->prev_wqe_size = fwqe1->wqe_size;
4930
4931                         break;
4932 #else
4933                 case IB_WR_FAST_REG_MR:
4934
4935                         QL_DPRINT12(ha, "FAST_MR (IB_WR_FAST_REG_MR)\n");
4936
4937                         wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4938                         fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4939                         fwqe1->prev_wqe_size = 3;
4940
4941                         rc = qlnxr_prepare_fmr(qp, fwqe1, wr);
4942
4943                         if (rc) {
4944                                 QL_DPRINT12(ha,
4945                                         "FAST_MR (IB_WR_FAST_REG_MR) failed"
4946                                         " rc = %d\n", rc);
4947                                 *bad_wr = wr;
4948                                 break;
4949                         }
4950
4951                         qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->prev_wqe_size;
4952                         qp->prev_wqe_size = fwqe1->prev_wqe_size;
4953
4954                         break;
4955 #endif /* #if __FreeBSD_version >= 1102000 */
4956
4957                 default:
4958
4959                         QL_DPRINT12(ha, "Invalid Opcode 0x%x!\n", wr->opcode);
4960
4961                         rc = -EINVAL;
4962                         *bad_wr = wr;
4963                         break;
4964                 }
4965
4966                 if (*bad_wr) {
4967                         /*
4968                          * restore prod to its position before this WR was processed
4969                          */
4970                         ecore_chain_set_prod(&qp->sq.pbl,
4971                              le16_to_cpu(qp->sq.db_data.data.value),
4972                              wqe);
4973                         /* restore prev_wqe_size */
4974                         qp->prev_wqe_size = wqe->prev_wqe_size;
4975                         status = rc;
4976
4977                         QL_DPRINT12(ha, "failed *bad_wr = %p\n", *bad_wr);
4978                         break; /* out of the loop */
4979                 }
4980
4981                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
4982
4983                 qlnxr_inc_sw_prod(&qp->sq);
4984
4985                 qp->sq.db_data.data.value++;
4986
4987                 wr = wr->next;
4988         }
4989
4990         /* Trigger doorbell
4991          * If there was a failure in the first WR then it will be triggered in
4992          * vane. However this is not harmful (as long as the producer value is
4993          * unchanged). For performance reasons we avoid checking for this
4994          * redundant doorbell.
4995          */
4996         wmb();
4997         //writel(qp->sq.db_data.raw, qp->sq.db);
4998
4999         reg_addr = (uint32_t)((uint8_t *)qp->sq.db - (uint8_t *)ha->cdev.doorbells);
5000         bus_write_4(ha->pci_dbells, reg_addr, qp->sq.db_data.raw);
5001         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5002
5003         mmiowb();
5004
5005         spin_unlock_irqrestore(&qp->q_lock, flags);
5006
5007         QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
5008                 ibqp, wr, bad_wr);
5009
5010         return status;
5011 }
5012
5013 static u32
5014 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
5015 {
5016         u32 used;
5017
5018         /* Calculate number of elements used based on producer
5019          * count and consumer count and subtract it from max
5020          * work request supported so that we get elements left.
5021          */
5022         used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
5023
5024         return hw_srq->max_wr - used;
5025 }
5026
5027
5028 int
5029 qlnxr_post_recv(struct ib_qp *ibqp,
5030         struct ib_recv_wr *wr,
5031         struct ib_recv_wr **bad_wr)
5032 {
5033         struct qlnxr_qp         *qp = get_qlnxr_qp(ibqp);
5034         struct qlnxr_dev        *dev = qp->dev;
5035         unsigned long           flags;
5036         int                     status = 0;
5037         qlnx_host_t             *ha;
5038         uint32_t                reg_addr;
5039
5040         ha = dev->ha;
5041
5042         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5043                 return -EINVAL;
5044
5045         QL_DPRINT12(ha, "enter\n");
5046  
5047         if (qp->qp_type == IB_QPT_GSI) {
5048                 QL_DPRINT12(ha, "(qp->qp_type = IB_QPT_GSI)\n");
5049                 return qlnxr_gsi_post_recv(ibqp, wr, bad_wr);
5050         }
5051
5052         if (qp->srq) {
5053                 QL_DPRINT11(ha, "qp->srq [%p]"
5054                         " QP is associated with SRQ, cannot post RQ buffers\n",
5055                         qp->srq);
5056                 return -EINVAL;
5057         }
5058
5059         spin_lock_irqsave(&qp->q_lock, flags);
5060
5061         if (qp->state == ECORE_ROCE_QP_STATE_RESET) {
5062                 spin_unlock_irqrestore(&qp->q_lock, flags);
5063                 *bad_wr = wr;
5064
5065                 QL_DPRINT11(ha, "qp->qp_type = ECORE_ROCE_QP_STATE_RESET\n");
5066
5067                 return -EINVAL;
5068         }
5069
5070         while (wr) {
5071                 int i;
5072
5073                 if ((ecore_chain_get_elem_left_u32(&qp->rq.pbl) <
5074                         QLNXR_MAX_RQE_ELEMENTS_PER_RQE) ||
5075                         (wr->num_sge > qp->rq.max_sges)) {
5076                         status = -ENOMEM;
5077                         *bad_wr = wr;
5078                         break;
5079                 }
5080                 for (i = 0; i < wr->num_sge; i++) {
5081                         u32 flags = 0;
5082                         struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
5083
5084                         /* first one must include the number of SGE in the list */
5085                         if (!i)
5086                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge);
5087
5088                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, wr->sg_list[i].lkey);
5089
5090                         RQ_SGE_SET(rqe, wr->sg_list[i].addr, \
5091                                 wr->sg_list[i].length, flags);
5092                 }
5093                 /* Special case of no sges. FW requires between 1-4 sges...
5094                  * in this case we need to post 1 sge with length zero. this is
5095                  * because rdma write with immediate consumes an RQ. */
5096                 if (!wr->num_sge) {
5097                         u32 flags = 0;
5098                         struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
5099
5100                         /* first one must include the number of SGE in the list */
5101                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
5102                         SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
5103
5104                         //RQ_SGE_SET(rqe, 0, 0, flags);
5105                         rqe->addr.hi = 0;
5106                         rqe->addr.lo = 0;
5107
5108                         rqe->length = 0;
5109                         rqe->flags = cpu_to_le32(flags);
5110
5111                         i = 1;
5112                 }
5113
5114                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
5115                 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
5116
5117                 qlnxr_inc_sw_prod(&qp->rq);
5118
5119                 wmb();
5120
5121                 qp->rq.db_data.data.value++;
5122
5123         //      writel(qp->rq.db_data.raw, qp->rq.db);
5124                 mmiowb();
5125         //      if (QLNX_IS_IWARP(dev)) {
5126         //              writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
5127         //              mmiowb(); /* for second doorbell */
5128         //      }
5129
5130                 reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
5131                                 (uint8_t *)ha->cdev.doorbells);
5132
5133                 bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
5134                 bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5135
5136                 if (QLNX_IS_IWARP(dev)) {
5137                         reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
5138                                                 (uint8_t *)ha->cdev.doorbells);
5139                         bus_write_4(ha->pci_dbells, reg_addr, \
5140                                 qp->rq.iwarp_db2_data.raw);
5141                         bus_barrier(ha->pci_dbells,  0, 0, \
5142                                 BUS_SPACE_BARRIER_READ);
5143                 }
5144
5145                 wr = wr->next;
5146         }
5147
5148         spin_unlock_irqrestore(&qp->q_lock, flags);
5149
5150         QL_DPRINT12(ha, "exit status = 0x%x\n", status);
5151
5152         return status;
5153 }
5154
5155 /* In fmr we need to increase the number of fmr completed counter for the fmr
5156  * algorithm determining whether we can free a pbl or not.
5157  * we need to perform this whether the work request was signaled or not. for
5158  * this purpose we call this function from the condition that checks if a wr
5159  * should be skipped, to make sure we don't miss it ( possibly this fmr
5160  * operation was not signalted)
5161  */
5162 static inline void
5163 qlnxr_chk_if_fmr(struct qlnxr_qp *qp)
5164 {
5165 #if __FreeBSD_version >= 1102000
5166
5167         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
5168                 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5169 #else
5170         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_FAST_REG_MR)
5171                 qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5172
5173 #endif /* #if __FreeBSD_version >= 1102000 */
5174 }
5175
5176 static int
5177 process_req(struct qlnxr_dev *dev,
5178         struct qlnxr_qp *qp,
5179         struct qlnxr_cq *cq,
5180         int num_entries,
5181         struct ib_wc *wc,
5182         u16 hw_cons,
5183         enum ib_wc_status status,
5184         int force)
5185 {
5186         u16             cnt = 0;
5187         qlnx_host_t     *ha = dev->ha;
5188
5189         QL_DPRINT12(ha, "enter\n");
5190  
5191         while (num_entries && qp->sq.wqe_cons != hw_cons) {
5192                 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
5193                         qlnxr_chk_if_fmr(qp);
5194                         /* skip WC */
5195                         goto next_cqe;
5196                 }
5197
5198                 /* fill WC */
5199                 wc->status = status;
5200                 wc->vendor_err = 0;
5201                 wc->wc_flags = 0;
5202                 wc->src_qp = qp->id;
5203                 wc->qp = &qp->ibqp;
5204
5205                 // common section
5206                 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
5207                 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
5208
5209                 switch (wc->opcode) {
5210
5211                 case IB_WC_RDMA_WRITE:
5212
5213                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
5214
5215                         QL_DPRINT12(ha,
5216                                 "opcode = IB_WC_RDMA_WRITE bytes = %d\n",
5217                                 qp->wqe_wr_id[qp->sq.cons].bytes_len);
5218                         break;
5219
5220                 case IB_WC_COMP_SWAP:
5221                 case IB_WC_FETCH_ADD:
5222                         wc->byte_len = 8;
5223                         break;
5224
5225 #if __FreeBSD_version >= 1102000
5226                 case IB_WC_REG_MR:
5227                         qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5228                         break;
5229 #else
5230                 case IB_WC_FAST_REG_MR:
5231                         qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5232                         break;
5233 #endif /* #if __FreeBSD_version >= 1102000 */
5234
5235                 case IB_WC_RDMA_READ:
5236                 case IB_WC_SEND:
5237
5238                         QL_DPRINT12(ha, "opcode = 0x%x \n", wc->opcode);
5239                         break;
5240                 default:
5241                         ;//DP_ERR("TBD ERROR");
5242                 }
5243
5244                 num_entries--;
5245                 wc++;
5246                 cnt++;
5247 next_cqe:
5248                 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
5249                         ecore_chain_consume(&qp->sq.pbl);
5250                 qlnxr_inc_sw_cons(&qp->sq);
5251         }
5252
5253         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5254         return cnt;
5255 }
5256
5257 static int
5258 qlnxr_poll_cq_req(struct qlnxr_dev *dev,
5259         struct qlnxr_qp *qp,
5260         struct qlnxr_cq *cq,
5261         int num_entries,
5262         struct ib_wc *wc,
5263         struct rdma_cqe_requester *req)
5264 {
5265         int             cnt = 0;
5266         qlnx_host_t     *ha = dev->ha;
5267
5268         QL_DPRINT12(ha, "enter req->status = 0x%x\n", req->status);
5269  
5270         switch (req->status) {
5271
5272         case RDMA_CQE_REQ_STS_OK:
5273
5274                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5275                         IB_WC_SUCCESS, 0);
5276                 break;
5277
5278         case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
5279
5280                 if (qp->state != ECORE_ROCE_QP_STATE_ERR)
5281                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5282                                   IB_WC_WR_FLUSH_ERR, 1);
5283                 break;
5284
5285         default: /* other errors case */
5286
5287                 /* process all WQE before the cosumer */
5288                 qp->state = ECORE_ROCE_QP_STATE_ERR;
5289                 cnt = process_req(dev, qp, cq, num_entries, wc,
5290                                 req->sq_cons - 1, IB_WC_SUCCESS, 0);
5291                 wc += cnt;
5292                 /* if we have extra WC fill it with actual error info */
5293
5294                 if (cnt < num_entries) {
5295                         enum ib_wc_status wc_status;
5296
5297                         switch (req->status) {
5298                         case    RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
5299                                 wc_status = IB_WC_BAD_RESP_ERR;
5300                                 break;
5301                         case    RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
5302                                 wc_status = IB_WC_LOC_LEN_ERR;
5303                                 break;
5304                         case    RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
5305                                 wc_status = IB_WC_LOC_QP_OP_ERR;
5306                                 break;
5307                         case    RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
5308                                 wc_status = IB_WC_LOC_PROT_ERR;
5309                                 break;
5310                         case    RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
5311                                 wc_status = IB_WC_MW_BIND_ERR;
5312                                 break;
5313                         case    RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
5314                                 wc_status = IB_WC_REM_INV_REQ_ERR;
5315                                 break;
5316                         case    RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
5317                                 wc_status = IB_WC_REM_ACCESS_ERR;
5318                                 break;
5319                         case    RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
5320                                 wc_status = IB_WC_REM_OP_ERR;
5321                                 break;
5322                         case    RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
5323                                 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
5324                                 break;
5325                         case    RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
5326                                 wc_status = IB_WC_RETRY_EXC_ERR;
5327                                 break;
5328                         default:
5329                                 wc_status = IB_WC_GENERAL_ERR;
5330                         }
5331
5332                         cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
5333                                         wc_status, 1 /* force use of WC */);
5334                 }
5335         }
5336
5337         QL_DPRINT12(ha, "exit cnt = %d\n", cnt);
5338         return cnt;
5339 }
5340
5341 static void
5342 __process_resp_one(struct qlnxr_dev *dev,
5343         struct qlnxr_qp *qp,
5344         struct qlnxr_cq *cq,
5345         struct ib_wc *wc,
5346         struct rdma_cqe_responder *resp,
5347         u64 wr_id)
5348 {
5349         enum ib_wc_status       wc_status = IB_WC_SUCCESS;
5350 #if __FreeBSD_version < 1102000
5351         u8                      flags;
5352 #endif
5353         qlnx_host_t             *ha = dev->ha;
5354
5355         QL_DPRINT12(ha, "enter qp = %p resp->status = 0x%x\n",
5356                 qp, resp->status);
5357  
5358         wc->opcode = IB_WC_RECV;
5359         wc->wc_flags = 0;
5360
5361         switch (resp->status) {
5362
5363         case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
5364                 wc_status = IB_WC_LOC_ACCESS_ERR;
5365                 break;
5366
5367         case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
5368                 wc_status = IB_WC_LOC_LEN_ERR;
5369                 break;
5370
5371         case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
5372                 wc_status = IB_WC_LOC_QP_OP_ERR;
5373                 break;
5374
5375         case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
5376                 wc_status = IB_WC_LOC_PROT_ERR;
5377                 break;
5378
5379         case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
5380                 wc_status = IB_WC_MW_BIND_ERR;
5381                 break;
5382
5383         case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
5384                 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
5385                 break;
5386
5387         case RDMA_CQE_RESP_STS_OK:
5388
5389 #if __FreeBSD_version >= 1102000
5390                 if (resp->flags & QLNXR_RESP_IMM) {
5391                         wc->ex.imm_data =
5392                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5393                         wc->wc_flags |= IB_WC_WITH_IMM;
5394
5395                         if (resp->flags & QLNXR_RESP_RDMA)
5396                                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5397
5398                         if (resp->flags & QLNXR_RESP_INV) {
5399                                 QL_DPRINT11(ha,
5400                                         "Invalid flags QLNXR_RESP_INV [0x%x]"
5401                                         "qp = %p qp->id = 0x%x cq = %p"
5402                                         " cq->icid = 0x%x\n",
5403                                         resp->flags, qp, qp->id, cq, cq->icid );
5404                         }
5405                 } else if (resp->flags & QLNXR_RESP_INV) {
5406                         wc->ex.imm_data =
5407                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5408                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
5409
5410                         if (resp->flags & QLNXR_RESP_RDMA) {
5411                                 QL_DPRINT11(ha,
5412                                         "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5413                                         "qp = %p qp->id = 0x%x cq = %p"
5414                                         " cq->icid = 0x%x\n",
5415                                         resp->flags, qp, qp->id, cq, cq->icid );
5416                         }
5417                 } else if (resp->flags & QLNXR_RESP_RDMA) {
5418                         QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5419                                 "qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5420                                 resp->flags, qp, qp->id, cq, cq->icid );
5421                 }
5422 #else
5423                 wc_status = IB_WC_SUCCESS;
5424                 wc->byte_len = le32_to_cpu(resp->length);
5425
5426                 flags = resp->flags & QLNXR_RESP_RDMA_IMM;
5427
5428                 switch (flags) {
5429
5430                 case QLNXR_RESP_RDMA_IMM:
5431                         /* update opcode */
5432                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5433                         /* fall to set imm data */
5434                 case QLNXR_RESP_IMM:
5435                         wc->ex.imm_data =
5436                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
5437                         wc->wc_flags |= IB_WC_WITH_IMM;
5438                         break;
5439                 case QLNXR_RESP_RDMA:
5440                         QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5441                                 "qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5442                                 resp->flags, qp, qp->id, cq, cq->icid );
5443                         break;
5444                 default:
5445                         /* valid configuration, but nothing todo here */
5446                         ;
5447                 }
5448 #endif /* #if __FreeBSD_version >= 1102000 */
5449
5450                 break;
5451         default:
5452                 wc_status = IB_WC_GENERAL_ERR;
5453         }
5454
5455         /* fill WC */
5456         wc->status = wc_status;
5457         wc->vendor_err = 0;
5458         wc->src_qp = qp->id;
5459         wc->qp = &qp->ibqp;
5460         wc->wr_id = wr_id;
5461
5462         QL_DPRINT12(ha, "exit status = 0x%x\n", wc_status);
5463
5464         return;
5465 }
5466
5467 static int
5468 process_resp_one_srq(struct qlnxr_dev *dev,
5469         struct qlnxr_qp *qp,
5470         struct qlnxr_cq *cq,
5471         struct ib_wc *wc,
5472         struct rdma_cqe_responder *resp)
5473 {
5474         struct qlnxr_srq        *srq = qp->srq;
5475         u64                     wr_id;
5476         qlnx_host_t             *ha = dev->ha;
5477
5478         QL_DPRINT12(ha, "enter\n");
5479  
5480         wr_id = HILO_U64(resp->srq_wr_id.hi, resp->srq_wr_id.lo);
5481
5482         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5483                 wc->status = IB_WC_WR_FLUSH_ERR;
5484                 wc->vendor_err = 0;
5485                 wc->wr_id = wr_id;
5486                 wc->byte_len = 0;
5487                 wc->src_qp = qp->id;
5488                 wc->qp = &qp->ibqp;
5489                 wc->wr_id = wr_id;
5490         } else {
5491                 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
5492         }
5493
5494         /* PBL is maintained in case of WR granularity.
5495          * So increment WR consumer after consuming WR
5496          */
5497         srq->hw_srq.wr_cons_cnt++;
5498
5499         QL_DPRINT12(ha, "exit\n");
5500         return 1;
5501 }
5502
5503 static int
5504 process_resp_one(struct qlnxr_dev *dev,
5505         struct qlnxr_qp *qp,
5506         struct qlnxr_cq *cq,
5507         struct ib_wc *wc,
5508         struct rdma_cqe_responder *resp)
5509 {
5510         qlnx_host_t     *ha = dev->ha;
5511         u64             wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5512
5513         QL_DPRINT12(ha, "enter\n");
5514  
5515         __process_resp_one(dev, qp, cq, wc, resp, wr_id);
5516
5517         while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5518                 ecore_chain_consume(&qp->rq.pbl);
5519         qlnxr_inc_sw_cons(&qp->rq);
5520
5521         QL_DPRINT12(ha, "exit\n");
5522         return 1;
5523 }
5524
5525 static int
5526 process_resp_flush(struct qlnxr_qp *qp,
5527         int num_entries,
5528         struct ib_wc *wc,
5529         u16 hw_cons)
5530 {
5531         u16             cnt = 0;
5532         qlnx_host_t     *ha = qp->dev->ha;
5533
5534         QL_DPRINT12(ha, "enter\n");
5535  
5536         while (num_entries && qp->rq.wqe_cons != hw_cons) {
5537                 /* fill WC */
5538                 wc->status = IB_WC_WR_FLUSH_ERR;
5539                 wc->vendor_err = 0;
5540                 wc->wc_flags = 0;
5541                 wc->src_qp = qp->id;
5542                 wc->byte_len = 0;
5543                 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5544                 wc->qp = &qp->ibqp;
5545                 num_entries--;
5546                 wc++;
5547                 cnt++;
5548                 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5549                         ecore_chain_consume(&qp->rq.pbl);
5550                 qlnxr_inc_sw_cons(&qp->rq);
5551         }
5552
5553         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5554         return cnt;
5555 }
5556
5557 static void
5558 try_consume_resp_cqe(struct qlnxr_cq *cq,
5559         struct qlnxr_qp *qp,
5560         struct rdma_cqe_responder *resp,
5561         int *update)
5562 {
5563         if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
5564                 consume_cqe(cq);
5565                 *update |= 1;
5566         }
5567 }
5568
5569 static int
5570 qlnxr_poll_cq_resp_srq(struct qlnxr_dev *dev,
5571         struct qlnxr_qp *qp,
5572         struct qlnxr_cq *cq,
5573         int num_entries,
5574         struct ib_wc *wc,
5575         struct rdma_cqe_responder *resp,
5576         int *update)
5577 {
5578         int             cnt;
5579         qlnx_host_t     *ha = dev->ha;
5580
5581         QL_DPRINT12(ha, "enter\n");
5582  
5583         cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
5584         consume_cqe(cq);
5585         *update |= 1;
5586
5587         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5588         return cnt;
5589 }
5590
5591 static int
5592 qlnxr_poll_cq_resp(struct qlnxr_dev *dev,
5593         struct qlnxr_qp *qp,
5594         struct qlnxr_cq *cq,
5595         int num_entries,
5596         struct ib_wc *wc,
5597         struct rdma_cqe_responder *resp,
5598         int *update)
5599 {
5600         int             cnt;
5601         qlnx_host_t     *ha = dev->ha;
5602
5603         QL_DPRINT12(ha, "enter\n");
5604  
5605         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5606                 cnt = process_resp_flush(qp, num_entries, wc,
5607                                 resp->rq_cons);
5608                 try_consume_resp_cqe(cq, qp, resp, update);
5609         } else {
5610                 cnt = process_resp_one(dev, qp, cq, wc, resp);
5611                 consume_cqe(cq);
5612                 *update |= 1;
5613         }
5614
5615         QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5616         return cnt;
5617 }
5618
5619 static void
5620 try_consume_req_cqe(struct qlnxr_cq *cq, struct qlnxr_qp *qp,
5621         struct rdma_cqe_requester *req, int *update)
5622 {
5623         if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
5624                 consume_cqe(cq);
5625                 *update |= 1;
5626         }
5627 }
5628
5629 static void
5630 doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
5631 {
5632         uint64_t        reg_addr;
5633         qlnx_host_t     *ha = dev->ha;
5634
5635         QL_DPRINT12(ha, "enter\n");
5636  
5637         wmb();
5638         cq->db.data.agg_flags = flags;
5639         cq->db.data.value = cpu_to_le32(cons);
5640
5641         reg_addr = (uint64_t)((uint8_t *)cq->db_addr -
5642                                 (uint8_t *)(ha->cdev.doorbells));
5643
5644         bus_write_8(ha->pci_dbells, reg_addr, cq->db.raw);
5645         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5646
5647         QL_DPRINT12(ha, "exit\n");
5648         return;
5649
5650 //#ifdef __LP64__
5651 //      writeq(cq->db.raw, cq->db_addr);
5652 //#else
5653         /* Note that since the FW allows 64 bit write only, in 32bit systems
5654          * the value of db_addr must be low enough. This is currently not
5655          * enforced.
5656          */
5657 //      writel(cq->db.raw & 0xffffffff, cq->db_addr);
5658 //      mmiowb();
5659 //#endif
5660 }
5661
5662
5663 static int
5664 is_valid_cqe(struct qlnxr_cq *cq, union rdma_cqe *cqe)
5665 {
5666         struct rdma_cqe_requester *resp_cqe = &cqe->req;
5667         return (resp_cqe->flags & RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK) ==
5668                         cq->pbl_toggle;
5669 }
5670
5671 int
5672 qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
5673 {
5674         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5675         struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
5676         int             done = 0;
5677         union rdma_cqe  *cqe = cq->latest_cqe;
5678         int             update = 0;
5679         u32             old_cons, new_cons;
5680         unsigned long   flags;
5681         qlnx_host_t     *ha = dev->ha;
5682
5683         QL_DPRINT12(ha, "enter\n");
5684
5685         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5686                 return -EINVAL;
5687  
5688         if (cq->destroyed) {
5689                 QL_DPRINT11(ha, "called after destroy for cq %p (icid=%d)\n",
5690                         cq, cq->icid);
5691                 return 0;
5692         }
5693
5694         if (cq->cq_type == QLNXR_CQ_TYPE_GSI)
5695                 return qlnxr_gsi_poll_cq(ibcq, num_entries, wc);
5696
5697         spin_lock_irqsave(&cq->cq_lock, flags);
5698
5699         old_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5700
5701         while (num_entries && is_valid_cqe(cq, cqe)) {
5702                 int cnt = 0;
5703                 struct qlnxr_qp *qp;
5704                 struct rdma_cqe_requester *resp_cqe;
5705                 enum rdma_cqe_type cqe_type;
5706
5707                 /* prevent speculative reads of any field of CQE */
5708                 rmb();
5709
5710                 resp_cqe = &cqe->req;
5711                 qp = (struct qlnxr_qp *)(uintptr_t)HILO_U64(resp_cqe->qp_handle.hi,
5712                                                 resp_cqe->qp_handle.lo);
5713
5714                 if (!qp) {
5715                         QL_DPRINT11(ha, "qp = NULL\n");
5716                         break;
5717                 }
5718
5719                 wc->qp = &qp->ibqp;
5720
5721                 cqe_type = GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
5722
5723                 switch (cqe_type) {
5724                 case RDMA_CQE_TYPE_REQUESTER:
5725                         cnt = qlnxr_poll_cq_req(dev, qp, cq, num_entries,
5726                                         wc, &cqe->req);
5727                         try_consume_req_cqe(cq, qp, &cqe->req, &update);
5728                         break;
5729                 case RDMA_CQE_TYPE_RESPONDER_RQ:
5730                         cnt = qlnxr_poll_cq_resp(dev, qp, cq, num_entries,
5731                                         wc, &cqe->resp, &update);
5732                         break;
5733                 case RDMA_CQE_TYPE_RESPONDER_SRQ:
5734                         cnt = qlnxr_poll_cq_resp_srq(dev, qp, cq, num_entries,
5735                                         wc, &cqe->resp, &update);
5736                         break;
5737                 case RDMA_CQE_TYPE_INVALID:
5738                 default:
5739                         QL_DPRINT11(ha, "cqe type [0x%x] invalid\n", cqe_type);
5740                         break;
5741                 }
5742                 num_entries -= cnt;
5743                 wc += cnt;
5744                 done += cnt;
5745
5746                 cqe = cq->latest_cqe;
5747         }
5748         new_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5749
5750         cq->cq_cons += new_cons - old_cons;
5751
5752         if (update) {
5753                 /* doorbell notifies abount latest VALID entry,
5754                  * but chain already point to the next INVALID one
5755                  */
5756                 doorbell_cq(dev, cq, cq->cq_cons - 1, cq->arm_flags);
5757                 QL_DPRINT12(ha, "cq = %p cons = 0x%x "
5758                         "arm_flags = 0x%x db.icid = 0x%x\n", cq,
5759                         (cq->cq_cons - 1), cq->arm_flags, cq->db.data.icid);
5760         }
5761
5762         spin_unlock_irqrestore(&cq->cq_lock, flags);
5763
5764         QL_DPRINT12(ha, "exit\n");
5765  
5766         return done;
5767 }
5768
5769
5770 int
5771 qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
5772 {
5773         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5774         unsigned long sflags;
5775         struct qlnxr_dev *dev;
5776         qlnx_host_t     *ha;
5777
5778         dev = get_qlnxr_dev((ibcq->device));
5779         ha = dev->ha;
5780
5781         QL_DPRINT12(ha, "enter ibcq = %p flags = 0x%x "
5782                 "cp = %p cons = 0x%x cq_type = 0x%x\n", ibcq,
5783                 flags, cq, cq->cq_cons, cq->cq_type);
5784
5785         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5786                 return -EINVAL;
5787
5788         if (cq->destroyed) {
5789                 QL_DPRINT11(ha, "cq was already destroyed cq = %p icid=%d\n",
5790                         cq, cq->icid);
5791                 return -EINVAL;
5792         }
5793
5794         if (cq->cq_type == QLNXR_CQ_TYPE_GSI) {
5795                 return 0;
5796         }
5797
5798         spin_lock_irqsave(&cq->cq_lock, sflags);
5799
5800         cq->arm_flags = 0;
5801
5802         if (flags & IB_CQ_SOLICITED) {
5803                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
5804         }
5805         if (flags & IB_CQ_NEXT_COMP) {
5806                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
5807         }
5808
5809         doorbell_cq(dev, cq, (cq->cq_cons - 1), cq->arm_flags);
5810
5811         spin_unlock_irqrestore(&cq->cq_lock, sflags);
5812
5813         QL_DPRINT12(ha, "exit ibcq = %p flags = 0x%x\n", ibcq, flags);
5814         return 0;
5815 }
5816
5817
5818 static struct qlnxr_mr *
5819 __qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
5820 {
5821         struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
5822         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
5823         struct qlnxr_mr *mr;
5824         int             rc = -ENOMEM;
5825         qlnx_host_t     *ha;
5826
5827         ha = dev->ha;
5828  
5829         QL_DPRINT12(ha, "enter ibpd = %p pd = %p "
5830                 " pd_id = %d max_page_list_len = %d\n",
5831                 ibpd, pd, pd->pd_id, max_page_list_len);
5832
5833         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5834         if (!mr) {
5835                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
5836                 return ERR_PTR(rc);
5837         }
5838
5839         mr->dev = dev;
5840         mr->type = QLNXR_MR_FRMR;
5841
5842         rc = qlnxr_init_mr_info(dev, &mr->info, max_page_list_len,
5843                                   1 /* allow dual layer pbl */);
5844         if (rc) {
5845                 QL_DPRINT11(ha, "qlnxr_init_mr_info failed\n");
5846                 goto err0;
5847         }
5848
5849         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
5850         if (rc) {
5851                 QL_DPRINT11(ha, "ecore_rdma_alloc_tid failed\n");
5852                 goto err0;
5853         }
5854
5855         /* index only, 18 bit long, lkey = itid << 8 | key */
5856         mr->hw_mr.tid_type = ECORE_RDMA_TID_FMR;
5857         mr->hw_mr.key = 0;
5858         mr->hw_mr.pd = pd->pd_id;
5859         mr->hw_mr.local_read = 1;
5860         mr->hw_mr.local_write = 0;
5861         mr->hw_mr.remote_read = 0;
5862         mr->hw_mr.remote_write = 0;
5863         mr->hw_mr.remote_atomic = 0;
5864         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
5865         mr->hw_mr.pbl_ptr = 0; /* Will be supplied during post */
5866         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
5867         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
5868         mr->hw_mr.fbo = 0;
5869         mr->hw_mr.length = 0;
5870         mr->hw_mr.vaddr = 0;
5871         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
5872         mr->hw_mr.phy_mr = true; /* Fast MR - True, Regular Register False */
5873         mr->hw_mr.dma_mr = false;
5874
5875         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
5876         if (rc) {
5877                 QL_DPRINT11(ha, "ecore_rdma_register_tid failed\n");
5878                 goto err1;
5879         }
5880
5881         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
5882         mr->ibmr.rkey = mr->ibmr.lkey;
5883
5884         QL_DPRINT12(ha, "exit mr = %p mr->ibmr.lkey = 0x%x\n",
5885                 mr, mr->ibmr.lkey);
5886
5887         return mr;
5888
5889 err1:
5890         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
5891 err0:
5892         kfree(mr);
5893
5894         QL_DPRINT12(ha, "exit\n");
5895
5896         return ERR_PTR(rc);
5897 }
5898
5899 #if __FreeBSD_version >= 1102000
5900
5901 struct ib_mr *
5902 qlnxr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, u32 max_num_sg)
5903 {
5904         struct qlnxr_dev *dev;
5905         struct qlnxr_mr *mr;
5906         qlnx_host_t     *ha;
5907
5908         dev = get_qlnxr_dev(ibpd->device);
5909         ha = dev->ha;
5910
5911         QL_DPRINT12(ha, "enter\n");
5912
5913         if (mr_type != IB_MR_TYPE_MEM_REG)
5914                 return ERR_PTR(-EINVAL);
5915
5916         mr = __qlnxr_alloc_mr(ibpd, max_num_sg);
5917
5918         if (IS_ERR(mr))
5919                 return ERR_PTR(-EINVAL);
5920
5921         QL_DPRINT12(ha, "exit mr = %p &mr->ibmr = %p\n", mr, &mr->ibmr);
5922
5923         return &mr->ibmr;
5924 }
5925
5926 static int
5927 qlnxr_set_page(struct ib_mr *ibmr, u64 addr)
5928 {
5929         struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5930         struct qlnxr_pbl *pbl_table;
5931         struct regpair *pbe;
5932         struct qlnxr_dev *dev;
5933         qlnx_host_t     *ha;
5934         u32 pbes_in_page;
5935
5936         dev = mr->dev;
5937         ha = dev->ha;
5938
5939         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
5940                 QL_DPRINT12(ha, "fails mr->npages %d\n", mr->npages);
5941                 return -ENOMEM;
5942         }
5943
5944         QL_DPRINT12(ha, "mr->npages %d addr = %p enter\n", mr->npages,
5945                 ((void *)addr));
5946
5947         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
5948         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
5949         pbe = (struct regpair *)pbl_table->va;
5950         pbe +=  mr->npages % pbes_in_page;
5951         pbe->lo = cpu_to_le32((u32)addr);
5952         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
5953
5954         mr->npages++;
5955
5956         QL_DPRINT12(ha, "mr->npages %d addr = %p exit \n", mr->npages,
5957                 ((void *)addr));
5958         return 0;
5959 }
5960
5961 int
5962 qlnxr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
5963         int sg_nents, unsigned int *sg_offset)
5964 {
5965         int             ret;
5966         struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5967         qlnx_host_t     *ha;
5968
5969         if (mr == NULL)
5970                 return (-1);
5971
5972         if (mr->dev == NULL)
5973                 return (-1);
5974
5975         ha = mr->dev->ha;
5976
5977         QL_DPRINT12(ha, "enter\n");
5978
5979         mr->npages = 0;
5980         qlnx_handle_completed_mrs(mr->dev, &mr->info);
5981
5982         ret = ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qlnxr_set_page);
5983
5984         QL_DPRINT12(ha, "exit ret = %d\n", ret);
5985
5986         return (ret);
5987 }
5988
5989 #else
5990
5991 struct ib_mr *
5992 qlnxr_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
5993 {
5994         struct qlnxr_dev *dev;
5995         struct qlnxr_mr *mr;
5996         qlnx_host_t     *ha;
5997         struct ib_mr *ibmr = NULL;
5998
5999         dev = get_qlnxr_dev((ibpd->device));
6000         ha = dev->ha;
6001
6002         QL_DPRINT12(ha, "enter\n");
6003
6004         mr = __qlnxr_alloc_mr(ibpd, max_page_list_len);
6005
6006         if (IS_ERR(mr)) {
6007                 ibmr = ERR_PTR(-EINVAL);
6008         } else {
6009                 ibmr = &mr->ibmr;
6010         }
6011
6012         QL_DPRINT12(ha, "exit %p\n", ibmr);
6013         return (ibmr);
6014 }
6015
6016 void
6017 qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
6018 {
6019         struct qlnxr_fast_reg_page_list *frmr_list;
6020
6021         frmr_list = get_qlnxr_frmr_list(page_list);
6022  
6023         free_mr_info(frmr_list->dev, &frmr_list->info);
6024
6025         kfree(frmr_list->ibfrpl.page_list);
6026         kfree(frmr_list);
6027
6028         return;
6029 }
6030
6031 struct ib_fast_reg_page_list *
6032 qlnxr_alloc_frmr_page_list(struct ib_device *ibdev, int page_list_len)
6033 {
6034         struct qlnxr_fast_reg_page_list *frmr_list = NULL;
6035         struct qlnxr_dev                *dev;
6036         int                             size = page_list_len * sizeof(u64);
6037         int                             rc = -ENOMEM;
6038         qlnx_host_t                     *ha;
6039
6040         dev = get_qlnxr_dev(ibdev);
6041         ha = dev->ha;
6042
6043         QL_DPRINT12(ha, "enter\n");
6044
6045         frmr_list = kzalloc(sizeof(*frmr_list), GFP_KERNEL);
6046         if (!frmr_list) {
6047                 QL_DPRINT11(ha, "kzalloc(frmr_list) failed\n");
6048                 goto err;
6049         }
6050
6051         frmr_list->dev = dev;
6052         frmr_list->ibfrpl.page_list = kzalloc(size, GFP_KERNEL);
6053         if (!frmr_list->ibfrpl.page_list) {
6054                 QL_DPRINT11(ha, "frmr_list->ibfrpl.page_list = NULL failed\n");
6055                 goto err0;
6056         }
6057
6058         rc = qlnxr_init_mr_info(dev, &frmr_list->info, page_list_len,
6059                           1 /* allow dual layer pbl */);
6060         if (rc)
6061                 goto err1;
6062
6063         QL_DPRINT12(ha, "exit %p\n", &frmr_list->ibfrpl);
6064
6065         return &frmr_list->ibfrpl;
6066
6067 err1:
6068         kfree(frmr_list->ibfrpl.page_list);
6069 err0:
6070         kfree(frmr_list);
6071 err:
6072         QL_DPRINT12(ha, "exit with error\n");
6073
6074         return ERR_PTR(rc);
6075 }
6076
6077 static int
6078 qlnxr_validate_phys_buf_list(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
6079         int buf_cnt, uint64_t *total_size)
6080 {
6081         u64 size = 0;
6082
6083         *total_size = 0;
6084
6085         if (!buf_cnt || buf_list == NULL) {
6086                 QL_DPRINT11(ha,
6087                         "failed buf_list = %p buf_cnt = %d\n", buf_list, buf_cnt);
6088                 return (-1);
6089         }
6090
6091         size = buf_list->size;
6092
6093         if (!size) {
6094                 QL_DPRINT11(ha,
6095                         "failed buf_list = %p buf_cnt = %d"
6096                         " buf_list->size = 0\n", buf_list, buf_cnt);
6097                 return (-1);
6098         }
6099
6100         while (buf_cnt) {
6101
6102                 *total_size += buf_list->size;
6103
6104                 if (buf_list->size != size) {
6105                         QL_DPRINT11(ha,
6106                                 "failed buf_list = %p buf_cnt = %d"
6107                                 " all buffers should have same size\n",
6108                                 buf_list, buf_cnt);
6109                         return (-1);
6110                 }
6111
6112                 buf_list++;
6113                 buf_cnt--;
6114         }
6115         return (0);
6116 }
6117
6118 static size_t
6119 qlnxr_get_num_pages(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
6120         int buf_cnt)
6121 {
6122         int     i;
6123         size_t  num_pages = 0;
6124         u64     size;
6125
6126         for (i = 0; i < buf_cnt; i++) {
6127
6128                 size = 0;
6129                 while (size < buf_list->size) {
6130                         size += PAGE_SIZE;
6131                         num_pages++;
6132                 }
6133                 buf_list++;
6134         }
6135         return (num_pages);
6136 }
6137
6138 static void
6139 qlnxr_populate_phys_mem_pbls(struct qlnxr_dev *dev,
6140         struct ib_phys_buf *buf_list, int buf_cnt,
6141         struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
6142 {
6143         struct regpair          *pbe;
6144         struct qlnxr_pbl        *pbl_tbl;
6145         int                     pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
6146         qlnx_host_t             *ha;
6147         int                     i;
6148         u64                     pbe_addr;
6149
6150         ha = dev->ha;
6151
6152         QL_DPRINT12(ha, "enter\n");
6153
6154         if (!pbl_info) {
6155                 QL_DPRINT11(ha, "PBL_INFO not initialized\n");
6156                 return;
6157         }
6158
6159         if (!pbl_info->num_pbes) {
6160                 QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
6161                 return;
6162         }
6163
6164         /* If we have a two layered pbl, the first pbl points to the rest
6165          * of the pbls and the first entry lays on the second pbl in the table
6166          */
6167         if (pbl_info->two_layered)
6168                 pbl_tbl = &pbl[1];
6169         else
6170                 pbl_tbl = pbl;
6171
6172         pbe = (struct regpair *)pbl_tbl->va;
6173         if (!pbe) {
6174                 QL_DPRINT12(ha, "pbe is NULL\n");
6175                 return;
6176         }
6177
6178         pbe_cnt = 0;
6179
6180         for (i = 0; i < buf_cnt; i++) {
6181
6182                 pages = buf_list->size >> PAGE_SHIFT;
6183
6184                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
6185                         /* store the page address in pbe */
6186
6187                         pbe_addr = buf_list->addr + (PAGE_SIZE * pg_cnt);
6188
6189                         pbe->lo = cpu_to_le32((u32)pbe_addr);
6190                         pbe->hi = cpu_to_le32(((u32)(pbe_addr >> 32)));
6191
6192                         QL_DPRINT12(ha, "Populate pbl table:"
6193                                 " pbe->addr=0x%x:0x%x "
6194                                 " pbe_cnt = %d total_num_pbes=%d"
6195                                 " pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
6196                                 total_num_pbes, pbe);
6197
6198                         pbe_cnt ++;
6199                         total_num_pbes ++;
6200                         pbe++;
6201
6202                         if (total_num_pbes == pbl_info->num_pbes)
6203                                 return;
6204
6205                         /* if the given pbl is full storing the pbes,
6206                          * move to next pbl.  */
6207
6208                         if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
6209                                 pbl_tbl++;
6210                                 pbe = (struct regpair *)pbl_tbl->va;
6211                                 pbe_cnt = 0;
6212                         }
6213                 }
6214                 buf_list++;
6215         }
6216         QL_DPRINT12(ha, "exit\n");
6217         return;
6218 }
6219
6220 struct ib_mr *
6221 qlnxr_reg_kernel_mr(struct ib_pd *ibpd,
6222         struct ib_phys_buf *buf_list,
6223         int buf_cnt, int acc, u64 *iova_start)
6224 {
6225         int             rc = -ENOMEM;
6226         struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
6227         struct qlnxr_mr *mr;
6228         struct qlnxr_pd *pd;
6229         qlnx_host_t     *ha;
6230         size_t          num_pages = 0;
6231         uint64_t        length;
6232
6233         ha = dev->ha;
6234
6235         QL_DPRINT12(ha, "enter\n");
6236
6237         pd = get_qlnxr_pd(ibpd);
6238
6239         QL_DPRINT12(ha, "pd = %d buf_list = %p, buf_cnt = %d,"
6240                 " iova_start = %p, acc = %d\n",
6241                 pd->pd_id, buf_list, buf_cnt, iova_start, acc);
6242
6243         //if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
6244         //      QL_DPRINT11(ha, "(acc & IB_ACCESS_REMOTE_WRITE &&"
6245         //              " !(acc & IB_ACCESS_LOCAL_WRITE))\n");
6246         //      return ERR_PTR(-EINVAL);
6247         //}
6248
6249         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
6250         if (!mr) {
6251                 QL_DPRINT11(ha, "kzalloc(mr) failed\n");
6252                 return ERR_PTR(rc);
6253         }
6254
6255         mr->type = QLNXR_MR_KERNEL;
6256         mr->iova_start = iova_start;
6257
6258         rc = qlnxr_validate_phys_buf_list(ha, buf_list, buf_cnt, &length);
6259         if (rc)
6260                 goto err0;
6261
6262         num_pages = qlnxr_get_num_pages(ha, buf_list, buf_cnt);
6263         if (!num_pages)
6264                 goto err0;
6265
6266         rc = qlnxr_init_mr_info(dev, &mr->info, num_pages, 1);
6267         if (rc) {
6268                 QL_DPRINT11(ha,
6269                         "qlnxr_init_mr_info failed [%d]\n", rc);
6270                 goto err1;
6271         }
6272
6273         qlnxr_populate_phys_mem_pbls(dev, buf_list, buf_cnt, mr->info.pbl_table,
6274                    &mr->info.pbl_info);
6275
6276         rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
6277
6278         if (rc) {
6279                 QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
6280                 goto err1;
6281         }
6282
6283         /* index only, 18 bit long, lkey = itid << 8 | key */
6284         mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
6285         mr->hw_mr.key = 0;
6286         mr->hw_mr.pd = pd->pd_id;
6287         mr->hw_mr.local_read = 1;
6288         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
6289         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
6290         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
6291         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
6292         mr->hw_mr.mw_bind = false; /* TBD MW BIND */
6293         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
6294         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
6295         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
6296         mr->hw_mr.page_size_log = ilog2(PAGE_SIZE); /* for the MR pages */
6297
6298         mr->hw_mr.fbo = 0;
6299
6300         mr->hw_mr.length = length;
6301         mr->hw_mr.vaddr = (uint64_t)iova_start;
6302         mr->hw_mr.zbva = false; /* TBD figure when this should be true */
6303         mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
6304         mr->hw_mr.dma_mr = false;
6305
6306         rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
6307         if (rc) {
6308                 QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
6309                 goto err2;
6310         }
6311
6312         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6313         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
6314                 mr->hw_mr.remote_atomic)
6315                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6316
6317         QL_DPRINT12(ha, "lkey: %x\n", mr->ibmr.lkey);
6318
6319         return (&mr->ibmr);
6320
6321 err2:
6322         ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
6323 err1:
6324         qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
6325 err0:
6326         kfree(mr);
6327
6328         QL_DPRINT12(ha, "exit [%d]\n", rc);
6329         return (ERR_PTR(rc));
6330 }
6331
6332 #endif /* #if __FreeBSD_version >= 1102000 */
6333
6334 struct ib_ah *
6335 #if __FreeBSD_version >= 1102000
6336 qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
6337         struct ib_udata *udata)
6338 #else
6339 qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
6340 #endif /* #if __FreeBSD_version >= 1102000 */
6341 {
6342         struct qlnxr_dev *dev;
6343         qlnx_host_t     *ha;
6344         struct qlnxr_ah *ah;
6345
6346         dev = get_qlnxr_dev((ibpd->device));
6347         ha = dev->ha;
6348
6349         QL_DPRINT12(ha, "in create_ah\n");
6350
6351         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
6352         if (!ah) {
6353                 QL_DPRINT12(ha, "no address handle can be allocated\n");
6354                 return ERR_PTR(-ENOMEM);
6355         }
6356         
6357         ah->attr = *attr;       
6358  
6359         return &ah->ibah;
6360 }
6361
6362 int
6363 qlnxr_destroy_ah(struct ib_ah *ibah)
6364 {
6365         struct qlnxr_dev *dev;
6366         qlnx_host_t     *ha;
6367         struct qlnxr_ah *ah = get_qlnxr_ah(ibah);
6368         
6369         dev = get_qlnxr_dev((ibah->device));
6370         ha = dev->ha;
6371
6372         QL_DPRINT12(ha, "in destroy_ah\n");
6373
6374         kfree(ah);
6375         return 0;
6376 }
6377
6378 int
6379 qlnxr_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6380 {
6381         struct qlnxr_dev *dev;
6382         qlnx_host_t     *ha;
6383
6384         dev = get_qlnxr_dev((ibah->device));
6385         ha = dev->ha;
6386         QL_DPRINT12(ha, "Query AH not supported\n");
6387         return -EINVAL;
6388 }
6389
6390 int
6391 qlnxr_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6392 {
6393         struct qlnxr_dev *dev;
6394         qlnx_host_t     *ha;
6395
6396         dev = get_qlnxr_dev((ibah->device));
6397         ha = dev->ha;
6398         QL_DPRINT12(ha, "Modify AH not supported\n");
6399         return -ENOSYS;
6400 }
6401
6402 #if __FreeBSD_version >= 1102000
6403 int
6404 qlnxr_process_mad(struct ib_device *ibdev,
6405                 int process_mad_flags,
6406                 u8 port_num,
6407                 const struct ib_wc *in_wc,
6408                 const struct ib_grh *in_grh,
6409                 const struct ib_mad_hdr *mad_hdr,
6410                 size_t in_mad_size,
6411                 struct ib_mad_hdr *out_mad,
6412                 size_t *out_mad_size,
6413                 u16 *out_mad_pkey_index)
6414
6415 #else
6416
6417 int
6418 qlnxr_process_mad(struct ib_device *ibdev,
6419                         int process_mad_flags,
6420                         u8 port_num,
6421                         struct ib_wc *in_wc,
6422                         struct ib_grh *in_grh,
6423                         struct ib_mad *in_mad,
6424                         struct ib_mad *out_mad)
6425
6426 #endif /* #if __FreeBSD_version >= 1102000 */
6427 {
6428         struct qlnxr_dev *dev;
6429         qlnx_host_t     *ha;
6430
6431         dev = get_qlnxr_dev(ibdev);
6432         ha = dev->ha;
6433         QL_DPRINT12(ha, "process mad not supported\n");
6434
6435         return -ENOSYS;
6436 //      QL_DPRINT12(ha, "qlnxr_process_mad in_mad %x %x %x %x %x %x %x %x\n",
6437 //               in_mad->mad_hdr.attr_id, in_mad->mad_hdr.base_version,
6438 //               in_mad->mad_hdr.attr_mod, in_mad->mad_hdr.class_specific,
6439 //               in_mad->mad_hdr.class_version, in_mad->mad_hdr.method,
6440 //               in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.status);
6441
6442 //      return IB_MAD_RESULT_SUCCESS;   
6443 }
6444
6445
6446 #if __FreeBSD_version >= 1102000
6447 int
6448 qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
6449         struct ib_port_immutable *immutable)
6450 {
6451         struct qlnxr_dev        *dev;
6452         qlnx_host_t             *ha;
6453         struct ib_port_attr     attr;
6454         int                     err;
6455
6456         dev = get_qlnxr_dev(ibdev);
6457         ha = dev->ha;
6458
6459         QL_DPRINT12(ha, "enter\n");
6460
6461         err = qlnxr_query_port(ibdev, port_num, &attr);
6462         if (err)
6463                 return err;
6464
6465         if (QLNX_IS_IWARP(dev)) {
6466                 immutable->pkey_tbl_len = 1;
6467                 immutable->gid_tbl_len = 1;
6468                 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
6469                 immutable->max_mad_size = 0;
6470         } else {
6471                 immutable->pkey_tbl_len = attr.pkey_tbl_len;
6472                 immutable->gid_tbl_len = attr.gid_tbl_len;
6473                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
6474                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
6475         }
6476
6477         QL_DPRINT12(ha, "exit\n");
6478         return 0;
6479 }
6480 #endif /* #if __FreeBSD_version > 1102000 */
6481
6482
6483 /***** iWARP related functions *************/
6484
6485
6486 static void
6487 qlnxr_iw_mpa_request(void *context,
6488         struct ecore_iwarp_cm_event_params *params)
6489 {
6490         struct qlnxr_iw_listener *listener = (struct qlnxr_iw_listener *)context;
6491         struct qlnxr_dev *dev = listener->dev;
6492         struct qlnxr_iw_ep *ep;
6493         struct iw_cm_event event;
6494         struct sockaddr_in *laddr;
6495         struct sockaddr_in *raddr;
6496         qlnx_host_t     *ha;
6497
6498         ha = dev->ha;
6499
6500         QL_DPRINT12(ha, "enter\n");
6501
6502         if (params->cm_info->ip_version != ECORE_TCP_IPV4) {
6503                 QL_DPRINT11(ha, "only IPv4 supported [0x%x]\n",
6504                         params->cm_info->ip_version);
6505                 return;
6506         }
6507  
6508         ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
6509
6510         if (!ep) {
6511                 QL_DPRINT11(ha, "kzalloc{ep) failed\n");
6512                 return;
6513         }
6514
6515         ep->dev = dev;
6516         ep->ecore_context = params->ep_context;
6517
6518         memset(&event, 0, sizeof(event));
6519
6520         event.event = IW_CM_EVENT_CONNECT_REQUEST;
6521         event.status = params->status;
6522
6523         laddr = (struct sockaddr_in *)&event.local_addr;
6524         raddr = (struct sockaddr_in *)&event.remote_addr;
6525
6526         laddr->sin_family = AF_INET;
6527         raddr->sin_family = AF_INET;
6528
6529         laddr->sin_port = htons(params->cm_info->local_port);
6530         raddr->sin_port = htons(params->cm_info->remote_port);
6531
6532         laddr->sin_addr.s_addr = htonl(params->cm_info->local_ip[0]);
6533         raddr->sin_addr.s_addr = htonl(params->cm_info->remote_ip[0]);
6534
6535         event.provider_data = (void *)ep;
6536         event.private_data = (void *)params->cm_info->private_data;
6537         event.private_data_len = (u8)params->cm_info->private_data_len;
6538
6539 #if __FreeBSD_version >= 1100000
6540         event.ord = params->cm_info->ord;
6541         event.ird = params->cm_info->ird;
6542 #endif /* #if __FreeBSD_version >= 1100000 */
6543
6544         listener->cm_id->event_handler(listener->cm_id, &event);
6545
6546         QL_DPRINT12(ha, "exit\n");
6547
6548         return;
6549 }
6550
6551 static void
6552 qlnxr_iw_issue_event(void *context,
6553          struct ecore_iwarp_cm_event_params *params,
6554          enum iw_cm_event_type event_type,
6555          char *str)
6556 {
6557         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6558         struct qlnxr_dev *dev = ep->dev;
6559         struct iw_cm_event event;
6560         qlnx_host_t     *ha;
6561
6562         ha = dev->ha;
6563
6564         QL_DPRINT12(ha, "enter\n");
6565
6566         memset(&event, 0, sizeof(event));
6567         event.status = params->status;
6568         event.event = event_type;
6569
6570         if (params->cm_info != NULL) {
6571 #if __FreeBSD_version >= 1100000
6572                 event.ird = params->cm_info->ird;
6573                 event.ord = params->cm_info->ord;
6574                 QL_DPRINT12(ha, "ord=[%d] \n", event.ord);
6575                 QL_DPRINT12(ha, "ird=[%d] \n", event.ird);
6576 #endif /* #if __FreeBSD_version >= 1100000 */
6577
6578                 event.private_data_len = params->cm_info->private_data_len;
6579                 event.private_data = (void *)params->cm_info->private_data;
6580                 QL_DPRINT12(ha, "private_data_len=[%d] \n",
6581                         event.private_data_len);
6582         }
6583
6584         QL_DPRINT12(ha, "event=[%d] %s\n", event.event, str);
6585         QL_DPRINT12(ha, "status=[%d] \n", event.status);
6586         
6587         if (ep) {
6588                 if (ep->cm_id)
6589                         ep->cm_id->event_handler(ep->cm_id, &event);
6590                 else
6591                         QL_DPRINT11(ha, "ep->cm_id == NULL \n");
6592         } else {
6593                 QL_DPRINT11(ha, "ep == NULL \n");
6594         }
6595
6596         QL_DPRINT12(ha, "exit\n");
6597
6598         return;
6599 }
6600
6601 static void
6602 qlnxr_iw_close_event(void *context,
6603          struct ecore_iwarp_cm_event_params *params)
6604 {
6605         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6606         struct qlnxr_dev *dev = ep->dev;
6607         qlnx_host_t     *ha;
6608
6609         ha = dev->ha;
6610
6611         QL_DPRINT12(ha, "enter\n");
6612  
6613         if (ep->cm_id) {
6614                 qlnxr_iw_issue_event(context,
6615                                     params,
6616                                     IW_CM_EVENT_CLOSE,
6617                                     "IW_CM_EVENT_EVENT_CLOSE");
6618                 ep->cm_id->rem_ref(ep->cm_id);
6619                 ep->cm_id = NULL;
6620         }
6621
6622         QL_DPRINT12(ha, "exit\n");
6623
6624         return;
6625 }
6626
6627 #if __FreeBSD_version >= 1102000
6628
6629 static void
6630 qlnxr_iw_passive_complete(void *context,
6631         struct ecore_iwarp_cm_event_params *params)
6632 {
6633         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6634         struct qlnxr_dev        *dev = ep->dev;
6635         qlnx_host_t             *ha;
6636
6637         ha = dev->ha;
6638
6639         /* We will only reach the following state if MPA_REJECT was called on
6640          * passive. In this case there will be no associated QP.
6641          */
6642         if ((params->status == -ECONNREFUSED) && (ep->qp == NULL)) {
6643                 QL_DPRINT11(ha, "PASSIVE connection refused releasing ep...\n");
6644                 kfree(ep);
6645                 return;
6646         }
6647
6648         /* We always issue an established event, however, ofed does not look
6649          * at event code for established. So if there was a failure, we follow
6650          * with close...
6651          */
6652         qlnxr_iw_issue_event(context,
6653                 params,
6654                 IW_CM_EVENT_ESTABLISHED,
6655                 "IW_CM_EVENT_ESTABLISHED");
6656
6657         if (params->status < 0) {
6658                 qlnxr_iw_close_event(context, params);
6659         }
6660
6661         return;
6662 }
6663
6664 struct qlnxr_discon_work {
6665         struct work_struct work;
6666         struct qlnxr_iw_ep *ep;
6667         enum ecore_iwarp_event_type event;
6668         int status;
6669 };
6670
6671 static void
6672 qlnxr_iw_disconnect_worker(struct work_struct *work)
6673 {
6674         struct qlnxr_discon_work *dwork =
6675                 container_of(work, struct qlnxr_discon_work, work);
6676         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
6677         struct qlnxr_iw_ep *ep = dwork->ep;
6678         struct qlnxr_dev *dev = ep->dev;
6679         struct qlnxr_qp *qp = ep->qp;
6680         struct iw_cm_event event;
6681
6682         if (qp->destroyed) {
6683                 kfree(dwork);
6684                 qlnxr_iw_qp_rem_ref(&qp->ibqp);
6685                 return;
6686         }
6687
6688         memset(&event, 0, sizeof(event));
6689         event.status = dwork->status;
6690         event.event = IW_CM_EVENT_DISCONNECT;
6691
6692         /* Success means graceful disconnect was requested. modifying
6693          * to SQD is translated to graceful disconnect. O/w reset is sent
6694          */
6695         if (dwork->status)
6696                 qp_params.new_state = ECORE_ROCE_QP_STATE_ERR;
6697         else
6698                 qp_params.new_state = ECORE_ROCE_QP_STATE_SQD;
6699
6700         kfree(dwork);
6701
6702         if (ep->cm_id)
6703                 ep->cm_id->event_handler(ep->cm_id, &event);
6704
6705         SET_FIELD(qp_params.modify_flags,
6706                   ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
6707
6708         ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
6709
6710         qlnxr_iw_qp_rem_ref(&qp->ibqp);
6711
6712         return;
6713 }
6714
6715 void
6716 qlnxr_iw_disconnect_event(void *context,
6717         struct ecore_iwarp_cm_event_params *params)
6718 {
6719         struct qlnxr_discon_work *work;
6720         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6721         struct qlnxr_dev *dev = ep->dev;
6722         struct qlnxr_qp *qp = ep->qp;
6723
6724         work = kzalloc(sizeof(*work), GFP_ATOMIC);
6725         if (!work)
6726                 return;
6727
6728         qlnxr_iw_qp_add_ref(&qp->ibqp);
6729         work->ep = ep;
6730         work->event = params->event;
6731         work->status = params->status;
6732
6733         INIT_WORK(&work->work, qlnxr_iw_disconnect_worker);
6734         queue_work(dev->iwarp_wq, &work->work);
6735
6736         return;
6737 }
6738
6739 #endif /* #if __FreeBSD_version >= 1102000 */
6740
6741 static int
6742 qlnxr_iw_mpa_reply(void *context,
6743         struct ecore_iwarp_cm_event_params *params)
6744 {
6745         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6746         struct qlnxr_dev        *dev = ep->dev;
6747         struct ecore_iwarp_send_rtr_in rtr_in;
6748         int                     rc;
6749         qlnx_host_t             *ha;
6750
6751         ha = dev->ha;
6752
6753         QL_DPRINT12(ha, "enter\n");
6754
6755         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6756                 return -EINVAL;
6757
6758         bzero(&rtr_in, sizeof(struct ecore_iwarp_send_rtr_in));
6759         rtr_in.ep_context = params->ep_context;
6760
6761         rc = ecore_iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
6762
6763         QL_DPRINT12(ha, "exit rc = %d\n", rc);
6764         return rc;
6765 }
6766
6767
6768 void
6769 qlnxr_iw_qp_event(void *context,
6770         struct ecore_iwarp_cm_event_params *params,
6771         enum ib_event_type ib_event,
6772         char *str)
6773 {
6774         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6775         struct qlnxr_dev *dev = ep->dev;
6776         struct ib_qp *ibqp = &(ep->qp->ibqp);
6777         struct ib_event event;
6778         qlnx_host_t     *ha;
6779
6780         ha = dev->ha;
6781
6782         QL_DPRINT12(ha,
6783                 "[context, event, event_handler] = [%p, 0x%x, %s, %p] enter\n",
6784                 context, params->event, str, ibqp->event_handler);
6785
6786         if (ibqp->event_handler) {
6787                 event.event = ib_event;
6788                 event.device = ibqp->device;
6789                 event.element.qp = ibqp;
6790                 ibqp->event_handler(&event, ibqp->qp_context);
6791         }
6792
6793         return;
6794 }
6795
6796 int
6797 qlnxr_iw_event_handler(void *context,
6798         struct ecore_iwarp_cm_event_params *params)
6799 {
6800         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6801         struct qlnxr_dev *dev = ep->dev;
6802         qlnx_host_t     *ha;
6803
6804         ha = dev->ha;
6805
6806         QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6807                 "enter\n", context, params->event);
6808  
6809         switch (params->event) {
6810
6811         /* Passive side request received */
6812         case ECORE_IWARP_EVENT_MPA_REQUEST:
6813                 qlnxr_iw_mpa_request(context, params);
6814                 break;
6815
6816         case ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY:
6817                 qlnxr_iw_mpa_reply(context, params);
6818                 break;
6819
6820         /* Passive side established ( ack on mpa response ) */
6821         case ECORE_IWARP_EVENT_PASSIVE_COMPLETE:
6822
6823 #if __FreeBSD_version >= 1102000
6824
6825                 ep->during_connect = 0;
6826                 qlnxr_iw_passive_complete(context, params);
6827
6828 #else
6829                 qlnxr_iw_issue_event(context,
6830                                     params,
6831                                     IW_CM_EVENT_ESTABLISHED,
6832                                     "IW_CM_EVENT_ESTABLISHED");
6833 #endif /* #if __FreeBSD_version >= 1102000 */
6834                 break;
6835
6836         /* Active side reply received */
6837         case ECORE_IWARP_EVENT_ACTIVE_COMPLETE:
6838                 ep->during_connect = 0;
6839                 qlnxr_iw_issue_event(context,
6840                                     params,
6841                                     IW_CM_EVENT_CONNECT_REPLY,
6842                                     "IW_CM_EVENT_CONNECT_REPLY");
6843                 if (params->status < 0) {
6844                         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6845
6846                         ep->cm_id->rem_ref(ep->cm_id);
6847                         ep->cm_id = NULL;
6848                 }
6849                 break;
6850
6851         case ECORE_IWARP_EVENT_DISCONNECT:
6852
6853 #if __FreeBSD_version >= 1102000
6854                 qlnxr_iw_disconnect_event(context, params);
6855 #else
6856                 qlnxr_iw_issue_event(context,
6857                                     params,
6858                                     IW_CM_EVENT_DISCONNECT,
6859                                     "IW_CM_EVENT_DISCONNECT");
6860                 qlnxr_iw_close_event(context, params);
6861 #endif /* #if __FreeBSD_version >= 1102000 */
6862                 break;
6863
6864         case ECORE_IWARP_EVENT_CLOSE:
6865                 ep->during_connect = 0;
6866                 qlnxr_iw_close_event(context, params);
6867                 break;
6868
6869         case ECORE_IWARP_EVENT_RQ_EMPTY:
6870                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6871                                  "IWARP_EVENT_RQ_EMPTY");
6872                 break;
6873
6874         case ECORE_IWARP_EVENT_IRQ_FULL:
6875                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6876                                  "IWARP_EVENT_IRQ_FULL");
6877                 break;
6878
6879         case ECORE_IWARP_EVENT_LLP_TIMEOUT:
6880                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6881                                  "IWARP_EVENT_LLP_TIMEOUT");
6882                 break;
6883
6884         case ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
6885                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6886                                  "IWARP_EVENT_REMOTE_PROTECTION_ERROR");
6887                 break;
6888
6889         case ECORE_IWARP_EVENT_CQ_OVERFLOW:
6890                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6891                                  "QED_IWARP_EVENT_CQ_OVERFLOW");
6892                 break;
6893
6894         case ECORE_IWARP_EVENT_QP_CATASTROPHIC:
6895                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6896                                  "QED_IWARP_EVENT_QP_CATASTROPHIC");
6897                 break;
6898
6899         case ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR:
6900                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6901                                  "IWARP_EVENT_LOCAL_ACCESS_ERROR");
6902                 break;
6903
6904         case ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR:
6905                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6906                                  "IWARP_EVENT_REMOTE_OPERATION_ERROR");
6907                 break;
6908
6909         case ECORE_IWARP_EVENT_TERMINATE_RECEIVED:
6910                 QL_DPRINT12(ha, "Got terminate message"
6911                         " ECORE_IWARP_EVENT_TERMINATE_RECEIVED\n");
6912                 break;
6913
6914         default:
6915                 QL_DPRINT12(ha,
6916                         "Unknown event [0x%x] received \n", params->event);
6917                 break;
6918         };
6919
6920         QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6921                 "exit\n", context, params->event);
6922         return 0;
6923 }
6924
6925 static int
6926 qlnxr_addr4_resolve(struct qlnxr_dev *dev,
6927                               struct sockaddr_in *src_in,
6928                               struct sockaddr_in *dst_in,
6929                               u8 *dst_mac)
6930 {
6931         int rc;
6932
6933 #if __FreeBSD_version >= 1100000
6934         rc = arpresolve(dev->ha->ifp, 0, NULL, (struct sockaddr *)dst_in,
6935                         dst_mac, NULL, NULL);
6936 #else
6937         struct llentry *lle;
6938
6939         rc = arpresolve(dev->ha->ifp, NULL, NULL, (struct sockaddr *)dst_in,
6940                         dst_mac, &lle);
6941 #endif
6942
6943         QL_DPRINT12(dev->ha, "rc = %d "
6944                 "sa_len = 0x%x sa_family = 0x%x IP Address = %d.%d.%d.%d "
6945                 "Dest MAC %02x:%02x:%02x:%02x:%02x:%02x\n", rc,
6946                 dst_in->sin_len, dst_in->sin_family,
6947                 NIPQUAD((dst_in->sin_addr.s_addr)),
6948                 dst_mac[0], dst_mac[1], dst_mac[2],
6949                 dst_mac[3], dst_mac[4], dst_mac[5]);
6950
6951         return rc;
6952 }
6953
6954 int
6955 qlnxr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6956 {
6957         struct qlnxr_dev *dev;
6958         struct ecore_iwarp_connect_out out_params;
6959         struct ecore_iwarp_connect_in in_params;
6960         struct qlnxr_iw_ep *ep;
6961         struct qlnxr_qp *qp;
6962         struct sockaddr_in *laddr;
6963         struct sockaddr_in *raddr;
6964         int rc = 0;
6965         qlnx_host_t     *ha;
6966
6967         dev = get_qlnxr_dev((cm_id->device));
6968         ha = dev->ha;
6969
6970         QL_DPRINT12(ha, "[cm_id, conn_param] = [%p, %p] "
6971                 "enter \n", cm_id, conn_param);
6972
6973         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6974                 return -EINVAL;
6975
6976         qp = idr_find(&dev->qpidr, conn_param->qpn);
6977
6978         laddr = (struct sockaddr_in *)&cm_id->local_addr;
6979         raddr = (struct sockaddr_in *)&cm_id->remote_addr;
6980
6981         QL_DPRINT12(ha,
6982                 "local = [%d.%d.%d.%d, %d] remote = [%d.%d.%d.%d, %d]\n",
6983                 NIPQUAD((laddr->sin_addr.s_addr)), laddr->sin_port,
6984                 NIPQUAD((raddr->sin_addr.s_addr)), raddr->sin_port);
6985
6986         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
6987         if (!ep) {
6988                 QL_DPRINT11(ha, "struct qlnxr_iw_ep "
6989                         "alloc memory failed\n");
6990                 return -ENOMEM;
6991         }
6992
6993         ep->dev = dev;
6994         ep->qp = qp;
6995         cm_id->add_ref(cm_id);
6996         ep->cm_id = cm_id;
6997
6998         memset(&in_params, 0, sizeof (struct ecore_iwarp_connect_in));
6999         memset(&out_params, 0, sizeof (struct ecore_iwarp_connect_out));
7000
7001         in_params.event_cb = qlnxr_iw_event_handler;
7002         in_params.cb_context = ep;
7003
7004         in_params.cm_info.ip_version = ECORE_TCP_IPV4;
7005
7006         in_params.cm_info.remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
7007         in_params.cm_info.local_ip[0] = ntohl(laddr->sin_addr.s_addr);
7008         in_params.cm_info.remote_port = ntohs(raddr->sin_port);
7009         in_params.cm_info.local_port = ntohs(laddr->sin_port);
7010         in_params.cm_info.vlan = 0;
7011         in_params.mss = dev->ha->ifp->if_mtu - 40;
7012
7013         QL_DPRINT12(ha, "remote_ip = [%d.%d.%d.%d] "
7014                 "local_ip = [%d.%d.%d.%d] remote_port = %d local_port = %d "
7015                 "vlan = %d\n",
7016                 NIPQUAD((in_params.cm_info.remote_ip[0])),
7017                 NIPQUAD((in_params.cm_info.local_ip[0])),
7018                 in_params.cm_info.remote_port, in_params.cm_info.local_port,
7019                 in_params.cm_info.vlan);
7020
7021         rc = qlnxr_addr4_resolve(dev, laddr, raddr, (u8 *)in_params.remote_mac_addr);
7022
7023         if (rc) {
7024                 QL_DPRINT11(ha, "qlnxr_addr4_resolve failed\n");
7025                 goto err;
7026         }
7027
7028         QL_DPRINT12(ha, "ord = %d ird=%d private_data=%p"
7029                 " private_data_len=%d rq_psn=%d\n",
7030                 conn_param->ord, conn_param->ird, conn_param->private_data,
7031                 conn_param->private_data_len, qp->rq_psn);
7032
7033         in_params.cm_info.ord = conn_param->ord;
7034         in_params.cm_info.ird = conn_param->ird;
7035         in_params.cm_info.private_data = conn_param->private_data;
7036         in_params.cm_info.private_data_len = conn_param->private_data_len;
7037         in_params.qp = qp->ecore_qp;
7038
7039         memcpy(in_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
7040
7041         rc = ecore_iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
7042
7043         if (rc) {
7044                 QL_DPRINT12(ha, "ecore_iwarp_connect failed\n");
7045                 goto err;
7046         }
7047
7048         QL_DPRINT12(ha, "exit\n");
7049
7050         return rc;
7051
7052 err:
7053         cm_id->rem_ref(cm_id);
7054         kfree(ep);
7055
7056         QL_DPRINT12(ha, "exit [%d]\n", rc);
7057         return rc;
7058 }
7059
7060 int
7061 qlnxr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
7062 {
7063         struct qlnxr_dev *dev;
7064         struct qlnxr_iw_listener *listener;
7065         struct ecore_iwarp_listen_in iparams;
7066         struct ecore_iwarp_listen_out oparams;
7067         struct sockaddr_in *laddr;
7068         qlnx_host_t     *ha;
7069         int rc;
7070
7071         dev = get_qlnxr_dev((cm_id->device));
7072         ha = dev->ha;
7073
7074         QL_DPRINT12(ha, "enter\n");
7075
7076         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7077                 return -EINVAL;
7078
7079         laddr = (struct sockaddr_in *)&cm_id->local_addr;
7080
7081         listener = kzalloc(sizeof(*listener), GFP_KERNEL);
7082
7083         if (listener == NULL) {
7084                 QL_DPRINT11(ha, "listener memory alloc failed\n");
7085                 return -ENOMEM;
7086         }
7087
7088         listener->dev = dev;
7089         cm_id->add_ref(cm_id);
7090         listener->cm_id = cm_id;
7091         listener->backlog = backlog;
7092
7093         memset(&iparams, 0, sizeof (struct ecore_iwarp_listen_in));
7094         memset(&oparams, 0, sizeof (struct ecore_iwarp_listen_out));
7095
7096         iparams.cb_context = listener;
7097         iparams.event_cb = qlnxr_iw_event_handler;
7098         iparams.max_backlog = backlog;
7099
7100         iparams.ip_version = ECORE_TCP_IPV4;
7101
7102         iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
7103         iparams.port = ntohs(laddr->sin_port);
7104         iparams.vlan = 0;
7105
7106         QL_DPRINT12(ha, "[%d.%d.%d.%d, %d] iparamsport=%d\n",
7107                 NIPQUAD((laddr->sin_addr.s_addr)),
7108                 laddr->sin_port, iparams.port);
7109
7110         rc = ecore_iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
7111         if (rc) {
7112                 QL_DPRINT11(ha,
7113                         "ecore_iwarp_create_listen failed rc = %d\n", rc);
7114                 goto err;
7115         }
7116
7117         listener->ecore_handle = oparams.handle;
7118         cm_id->provider_data = listener;
7119
7120         QL_DPRINT12(ha, "exit\n");
7121         return rc;
7122
7123 err:
7124         cm_id->rem_ref(cm_id);
7125         kfree(listener);
7126
7127         QL_DPRINT12(ha, "exit [%d]\n", rc);
7128         return rc;
7129 }
7130
7131 void
7132 qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id)
7133 {
7134         struct qlnxr_iw_listener *listener = cm_id->provider_data;
7135         struct qlnxr_dev *dev = get_qlnxr_dev((cm_id->device));
7136         int rc = 0;
7137         qlnx_host_t     *ha;
7138
7139         ha = dev->ha;
7140
7141         QL_DPRINT12(ha, "enter\n");
7142
7143         if (listener->ecore_handle)
7144                 rc = ecore_iwarp_destroy_listen(dev->rdma_ctx,
7145                                 listener->ecore_handle);
7146
7147         cm_id->rem_ref(cm_id);
7148
7149         QL_DPRINT12(ha, "exit [%d]\n", rc);
7150         return;
7151 }
7152
7153 int
7154 qlnxr_iw_accept(struct iw_cm_id *cm_id,
7155         struct iw_cm_conn_param *conn_param)
7156 {
7157         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7158         struct qlnxr_dev *dev = ep->dev;
7159         struct qlnxr_qp *qp;
7160         struct ecore_iwarp_accept_in params;
7161         int rc;
7162         qlnx_host_t     *ha;
7163
7164         ha = dev->ha;
7165
7166         QL_DPRINT12(ha, "enter  qpid=%d\n", conn_param->qpn);
7167
7168         if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7169                 return -EINVAL;
7170  
7171         qp = idr_find(&dev->qpidr, conn_param->qpn);
7172         if (!qp) {
7173                 QL_DPRINT11(ha, "idr_find failed invalid qpn = %d\n",
7174                         conn_param->qpn);
7175                 return -EINVAL;
7176         }
7177         ep->qp = qp;
7178         qp->ep = ep;
7179         cm_id->add_ref(cm_id);
7180         ep->cm_id = cm_id;
7181
7182         params.ep_context = ep->ecore_context;
7183         params.cb_context = ep;
7184         params.qp = ep->qp->ecore_qp;
7185         params.private_data = conn_param->private_data;
7186         params.private_data_len = conn_param->private_data_len;
7187         params.ird = conn_param->ird;
7188         params.ord = conn_param->ord;
7189
7190         rc = ecore_iwarp_accept(dev->rdma_ctx, &params);
7191         if (rc) {
7192                 QL_DPRINT11(ha, "ecore_iwarp_accept failed %d\n", rc);
7193                 goto err;
7194         }
7195
7196         QL_DPRINT12(ha, "exit\n");
7197         return 0;
7198 err:
7199         cm_id->rem_ref(cm_id);
7200         QL_DPRINT12(ha, "exit rc = %d\n", rc);
7201         return rc;
7202 }
7203
7204 int
7205 qlnxr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
7206 {
7207 #if __FreeBSD_version >= 1102000
7208
7209         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7210         struct qlnxr_dev *dev = ep->dev;
7211         struct ecore_iwarp_reject_in params;
7212         int rc;
7213
7214         params.ep_context = ep->ecore_context;
7215         params.cb_context = ep;
7216         params.private_data = pdata;
7217         params.private_data_len = pdata_len;
7218         ep->qp = NULL;
7219
7220         rc = ecore_iwarp_reject(dev->rdma_ctx, &params);
7221
7222         return rc;
7223
7224 #else
7225
7226         printf("iWARP reject_cr not implemented\n");
7227         return -EINVAL;
7228
7229 #endif /* #if __FreeBSD_version >= 1102000 */
7230 }
7231
7232 void
7233 qlnxr_iw_qp_add_ref(struct ib_qp *ibqp)
7234 {
7235         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7236         qlnx_host_t     *ha;
7237
7238         ha = qp->dev->ha;
7239
7240         QL_DPRINT12(ha, "enter ibqp = %p\n", ibqp);
7241  
7242         atomic_inc(&qp->refcnt);
7243
7244         QL_DPRINT12(ha, "exit \n");
7245         return;
7246 }
7247
7248 void
7249 qlnxr_iw_qp_rem_ref(struct ib_qp *ibqp)
7250 {
7251         struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7252         qlnx_host_t     *ha;
7253
7254         ha = qp->dev->ha;
7255
7256         QL_DPRINT12(ha, "enter ibqp = %p qp = %p\n", ibqp, qp);
7257
7258         if (atomic_dec_and_test(&qp->refcnt)) {
7259                 qlnxr_idr_remove(qp->dev, qp->qp_id);
7260                 kfree(qp);
7261         }
7262
7263         QL_DPRINT12(ha, "exit \n");
7264         return;
7265 }
7266
7267 struct ib_qp *
7268 qlnxr_iw_get_qp(struct ib_device *ibdev, int qpn)
7269 {
7270         struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
7271         struct ib_qp *qp;
7272         qlnx_host_t     *ha;
7273
7274         ha = dev->ha;
7275
7276         QL_DPRINT12(ha, "enter dev = %p ibdev = %p qpn = %d\n", dev, ibdev, qpn);
7277
7278         qp = idr_find(&dev->qpidr, qpn);
7279
7280         QL_DPRINT12(ha, "exit qp = %p\n", qp);
7281
7282         return (qp);
7283 }