1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/pciio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/bus_dma.h>
45 #include <sys/ioccom.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/linker.h>
50 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/queue.h>
57 #include <sys/taskqueue.h>
59 #include <sys/queue.h>
61 #include <netinet/in.h>
67 #include <rdma/ib_verbs.h>
68 #include <rdma/ib_umem.h>
69 #include <rdma/ib_user_verbs.h>
70 #include <linux/idr.h>
71 #include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
74 #include <cxgb_include.h>
75 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
76 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
77 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
78 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
79 #include <ulp/iw_cxgb/iw_cxgb.h>
80 #include <ulp/iw_cxgb/iw_cxgb_resource.h>
81 #include <ulp/iw_cxgb/iw_cxgb_user.h>
84 iwch_modify_port(struct ib_device *ibdev,
85 u8 port, int port_modify_mask,
86 struct ib_port_modify *props)
92 iwch_ah_create(struct ib_pd *pd,
93 struct ib_ah_attr *ah_attr)
95 return ERR_PTR(-ENOSYS);
99 iwch_ah_destroy(struct ib_ah *ah)
104 static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
110 iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
116 iwch_process_mad(struct ib_device *ibdev,
120 struct ib_grh *in_grh,
121 struct ib_mad *in_mad, struct ib_mad *out_mad)
127 iwch_dealloc_ucontext(struct ib_ucontext *context)
129 struct iwch_dev *rhp = to_iwch_dev(context->device);
130 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
131 struct iwch_mm_entry *mm, *tmp;
133 CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
134 TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
135 TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
138 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
143 static struct ib_ucontext *
144 iwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
146 struct iwch_ucontext *context;
147 struct iwch_dev *rhp = to_iwch_dev(ibdev);
149 CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
150 context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
152 return ERR_PTR(-ENOMEM);
153 cxio_init_ucontext(&rhp->rdev, &context->uctx);
154 TAILQ_INIT(&context->mmaps);
155 mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
156 return &context->ibucontext;
160 iwch_destroy_cq(struct ib_cq *ib_cq)
164 CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
165 chp = to_iwch_cq(ib_cq);
167 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
168 mtx_lock(&chp->lock);
170 msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
171 mtx_unlock(&chp->lock);
173 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
178 static struct ib_cq *
179 iwch_create_cq(struct ib_device *ibdev, struct ib_cq_init_attr *attr,
180 struct ib_ucontext *ib_context,
181 struct ib_udata *udata)
183 struct iwch_dev *rhp;
185 struct iwch_create_cq_resp uresp;
186 struct iwch_create_cq_req ureq;
187 struct iwch_ucontext *ucontext = NULL;
190 int entries = attr->cqe;
192 CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
193 rhp = to_iwch_dev(ibdev);
194 chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
196 return ERR_PTR(-ENOMEM);
199 ucontext = to_iwch_ucontext(ib_context);
200 if (!t3a_device(rhp)) {
201 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
203 return ERR_PTR(-EFAULT);
205 chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
209 if (t3a_device(rhp)) {
212 * T3A: Add some fluff to handle extra CQEs inserted
213 * for various errors.
214 * Additional CQE possibilities:
216 * incoming RDMA WRITE Failures
217 * incoming RDMA READ REQUEST FAILUREs
218 * NOTE: We cannot ensure the CQ won't overflow.
222 entries = roundup_pow_of_two(entries);
223 chp->cq.size_log2 = ilog2(entries);
225 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
227 return ERR_PTR(-ENOMEM);
230 chp->ibcq.cqe = 1 << chp->cq.size_log2;
231 mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
233 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
234 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
236 return ERR_PTR(-ENOMEM);
240 struct iwch_mm_entry *mm;
242 mm = kmalloc(sizeof *mm, M_NOWAIT);
244 iwch_destroy_cq(&chp->ibcq);
245 return ERR_PTR(-ENOMEM);
247 uresp.cqid = chp->cq.cqid;
248 uresp.size_log2 = chp->cq.size_log2;
249 mtx_lock(&ucontext->mmap_lock);
250 uresp.key = ucontext->key;
251 ucontext->key += PAGE_SIZE;
252 mtx_unlock(&ucontext->mmap_lock);
254 mm->addr = vtophys(chp->cq.queue);
255 if (udata->outlen < sizeof uresp) {
257 CTR1(KTR_IW_CXGB, "%s Warning - "
258 "downlevel libcxgb3 (non-fatal).\n",
260 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
261 sizeof(struct t3_cqe));
262 resplen = sizeof(struct iwch_create_cq_resp_v0);
264 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
265 sizeof(struct t3_cqe));
266 uresp.memsize = mm->len;
267 resplen = sizeof uresp;
269 if (ib_copy_to_udata(udata, &uresp, resplen)) {
271 iwch_destroy_cq(&chp->ibcq);
272 return ERR_PTR(-EFAULT);
274 insert_mmap(ucontext, mm);
276 CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
277 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
278 (unsigned long long) chp->cq.dma_addr);
283 iwch_resize_cq(struct ib_cq *cq __unused, int cqe __unused,
284 struct ib_udata *udata __unused)
291 iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
293 struct iwch_dev *rhp;
295 enum t3_cq_opcode cq_op;
299 chp = to_iwch_cq(ibcq);
301 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
305 if (chp->user_rptr_addr) {
306 if (copyin(&rptr, chp->user_rptr_addr, 4))
308 mtx_lock(&chp->lock);
311 mtx_lock(&chp->lock);
312 CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
313 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
314 mtx_unlock(&chp->lock);
316 log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
318 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
324 iwch_mmap(struct ib_ucontext *context __unused, struct vm_area_struct *vma __unused)
330 static int iwch_deallocate_pd(struct ib_pd *pd)
332 struct iwch_dev *rhp;
335 php = to_iwch_pd(pd);
337 CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
338 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
343 static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
344 struct ib_ucontext *context,
345 struct ib_udata *udata)
349 struct iwch_dev *rhp;
351 CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
352 rhp = (struct iwch_dev *) ibdev;
353 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
355 return ERR_PTR(-EINVAL);
356 php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
358 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
359 return ERR_PTR(-ENOMEM);
364 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
365 iwch_deallocate_pd(&php->ibpd);
366 return ERR_PTR(-EFAULT);
369 CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
373 static int iwch_dereg_mr(struct ib_mr *ib_mr)
375 struct iwch_dev *rhp;
379 CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
380 /* There can be no memory windows */
381 if (atomic_load_acq_int(&ib_mr->usecnt.counter))
384 mhp = to_iwch_mr(ib_mr);
386 mmid = mhp->attr.stag >> 8;
387 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
390 remove_handle(rhp, &rhp->mmidr, mmid);
392 cxfree((void *) (unsigned long) mhp->kva);
394 ib_umem_release(mhp->umem);
395 CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
400 static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
401 struct ib_phys_buf *buffer_list,
410 struct iwch_dev *rhp;
415 CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
416 php = to_iwch_pd(pd);
419 mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
421 return ERR_PTR(-ENOMEM);
425 /* First check that we have enough alignment */
426 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
431 if (num_phys_buf > 1 &&
432 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
437 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
438 &total_size, &npages, &shift, &page_list);
442 ret = iwch_alloc_pbl(mhp, npages);
448 ret = iwch_write_pbl(mhp, page_list, npages, 0);
453 mhp->attr.pdid = php->pdid;
456 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
457 mhp->attr.va_fbo = *iova_start;
458 mhp->attr.page_size = shift - 12;
460 mhp->attr.len = (u32) total_size;
461 mhp->attr.pbl_size = npages;
462 ret = iwch_register_mem(rhp, php, mhp, shift);
477 static int iwch_reregister_phys_mem(struct ib_mr *mr,
480 struct ib_phys_buf *buffer_list,
482 int acc, u64 * iova_start)
485 struct iwch_mr mh, *mhp;
487 struct iwch_dev *rhp;
488 __be64 *page_list = NULL;
494 CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
496 /* There can be no memory windows */
497 if (atomic_load_acq_int(&mr->usecnt.counter))
500 mhp = to_iwch_mr(mr);
502 php = to_iwch_pd(mr->pd);
504 /* make sure we are on the same adapter */
508 memcpy(&mh, mhp, sizeof *mhp);
510 if (mr_rereg_mask & IB_MR_REREG_PD)
511 php = to_iwch_pd(pd);
512 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
513 mh.attr.perms = iwch_ib_to_tpt_access(acc);
514 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
515 ret = build_phys_page_list(buffer_list, num_phys_buf,
517 &total_size, &npages,
523 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
528 if (mr_rereg_mask & IB_MR_REREG_PD)
529 mhp->attr.pdid = php->pdid;
530 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
531 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
532 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
534 mhp->attr.va_fbo = *iova_start;
535 mhp->attr.page_size = shift - 12;
536 mhp->attr.len = (u32) total_size;
537 mhp->attr.pbl_size = npages;
544 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
545 u64 virt, int acc, struct ib_udata *udata,
552 struct iwch_dev *rhp;
555 struct iwch_reg_user_mr_resp uresp;
556 struct scatterlist *sg;
558 CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
560 php = to_iwch_pd(pd);
562 mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
564 return ERR_PTR(-ENOMEM);
568 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
569 if (IS_ERR(mhp->umem)) {
570 err = PTR_ERR(mhp->umem);
572 return ERR_PTR(-err);
575 shift = ffs(mhp->umem->page_size) - 1;
579 err = iwch_alloc_pbl(mhp, n);
583 pages = (__be64 *) kmalloc(n * sizeof(u64), M_NOWAIT);
591 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
592 len = sg_dma_len(sg) >> shift;
593 for (k = 0; k < len; ++k) {
594 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
595 mhp->umem->page_size * k);
596 if (i == PAGE_SIZE / sizeof *pages) {
597 err = iwch_write_pbl(mhp, pages, i, n);
606 TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
607 for (j = 0; j < chunk->nmap; ++j) {
608 len = sg_dma_len(&chunk->page_list[j]) >> shift;
609 for (k = 0; k < len; ++k) {
610 pages[i++] = htobe64(sg_dma_address(
611 &chunk->page_list[j]) +
612 mhp->umem->page_size * k);
613 if (i == PAGE_SIZE / sizeof *pages) {
614 err = iwch_write_pbl(mhp, pages, i, n);
625 err = iwch_write_pbl(mhp, pages, i, n);
631 mhp->attr.pdid = php->pdid;
633 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
634 mhp->attr.va_fbo = virt;
635 mhp->attr.page_size = shift - 12;
636 mhp->attr.len = (u32) length;
638 err = iwch_register_mem(rhp, php, mhp, shift);
642 if (udata && !t3a_device(rhp)) {
643 uresp.pbl_addr = (mhp->attr.pbl_addr -
644 rhp->rdev.rnic_info.pbl_base) >> 3;
645 CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
648 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
649 iwch_dereg_mr(&mhp->ibmr);
661 ib_umem_release(mhp->umem);
663 return ERR_PTR(-err);
666 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
668 struct ib_phys_buf bl;
672 CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
675 * T3 only supports 32 bits of size.
677 bl.size = 0xffffffff;
680 ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
684 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
686 struct iwch_dev *rhp;
693 php = to_iwch_pd(pd);
695 mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
697 return ERR_PTR(-ENOMEM);
698 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
701 return ERR_PTR(-ret);
704 mhp->attr.pdid = php->pdid;
705 mhp->attr.type = TPT_MW;
706 mhp->attr.stag = stag;
708 mhp->ibmw.rkey = stag;
709 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
710 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
712 return ERR_PTR(-ENOMEM);
714 CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
718 static int iwch_dealloc_mw(struct ib_mw *mw)
720 struct iwch_dev *rhp;
724 mhp = to_iwch_mw(mw);
726 mmid = (mw->rkey) >> 8;
727 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
728 remove_handle(rhp, &rhp->mmidr, mmid);
730 CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
734 static int iwch_destroy_qp(struct ib_qp *ib_qp)
736 struct iwch_dev *rhp;
738 struct iwch_qp_attributes attrs;
739 struct iwch_ucontext *ucontext;
741 qhp = to_iwch_qp(ib_qp);
744 attrs.next_state = IWCH_QP_STATE_ERROR;
745 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
746 mtx_lock(&qhp->lock);
748 msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
749 mtx_unlock(&qhp->lock);
751 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
753 mtx_lock(&qhp->lock);
755 msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
756 mtx_unlock(&qhp->lock);
758 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
760 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
761 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
763 CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
764 ib_qp, qhp->wq.qpid, qhp);
769 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
770 struct ib_qp_init_attr *attrs,
771 struct ib_udata *udata)
773 struct iwch_dev *rhp;
776 struct iwch_cq *schp;
777 struct iwch_cq *rchp;
778 struct iwch_create_qp_resp uresp;
779 int wqsize, sqsize, rqsize;
780 struct iwch_ucontext *ucontext;
782 CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
783 if (attrs->qp_type != IB_QPT_RC)
784 return ERR_PTR(-EINVAL);
785 php = to_iwch_pd(pd);
787 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
788 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
790 return ERR_PTR(-EINVAL);
792 /* The RQT size must be # of entries + 1 rounded up to a power of two */
793 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
794 if (rqsize == attrs->cap.max_recv_wr)
795 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
797 /* T3 doesn't support RQT depth < 16 */
801 if (rqsize > T3_MAX_RQ_SIZE)
802 return ERR_PTR(-EINVAL);
804 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
805 return ERR_PTR(-EINVAL);
808 * NOTE: The SQ and total WQ sizes don't need to be
809 * a power of two. However, all the code assumes
810 * they are. EG: Q_FREECNT() and friends.
812 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
813 wqsize = roundup_pow_of_two(rqsize + sqsize);
814 CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
815 wqsize, sqsize, rqsize);
816 qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
818 return ERR_PTR(-ENOMEM);
819 qhp->wq.size_log2 = ilog2(wqsize);
820 qhp->wq.rq_size_log2 = ilog2(rqsize);
821 qhp->wq.sq_size_log2 = ilog2(sqsize);
822 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
823 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
824 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
826 return ERR_PTR(-ENOMEM);
829 attrs->cap.max_recv_wr = rqsize - 1;
830 attrs->cap.max_send_wr = sqsize;
831 attrs->cap.max_inline_data = T3_MAX_INLINE;
834 qhp->attr.pd = php->pdid;
835 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
836 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
837 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
838 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
839 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
840 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
841 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
842 qhp->attr.state = IWCH_QP_STATE_IDLE;
843 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
846 * XXX - These don't get passed in from the openib user
847 * at create time. The CM sets them via a QP modify.
848 * Need to fix... I think the CM should
850 qhp->attr.enable_rdma_read = 1;
851 qhp->attr.enable_rdma_write = 1;
852 qhp->attr.enable_bind = 1;
853 qhp->attr.max_ord = 1;
854 qhp->attr.max_ird = 1;
856 mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
859 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
860 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
861 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
863 return ERR_PTR(-ENOMEM);
868 struct iwch_mm_entry *mm1, *mm2;
870 mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
872 iwch_destroy_qp(&qhp->ibqp);
873 return ERR_PTR(-ENOMEM);
876 mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
879 iwch_destroy_qp(&qhp->ibqp);
880 return ERR_PTR(-ENOMEM);
883 uresp.qpid = qhp->wq.qpid;
884 uresp.size_log2 = qhp->wq.size_log2;
885 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
886 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
887 mtx_lock(&ucontext->mmap_lock);
888 uresp.key = ucontext->key;
889 ucontext->key += PAGE_SIZE;
890 uresp.db_key = ucontext->key;
891 ucontext->key += PAGE_SIZE;
892 mtx_unlock(&ucontext->mmap_lock);
893 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
896 iwch_destroy_qp(&qhp->ibqp);
897 return ERR_PTR(-EFAULT);
899 mm1->key = uresp.key;
900 mm1->addr = vtophys(qhp->wq.queue);
901 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
902 insert_mmap(ucontext, mm1);
903 mm2->key = uresp.db_key;
904 mm2->addr = qhp->wq.udb & PAGE_MASK;
905 mm2->len = PAGE_SIZE;
906 insert_mmap(ucontext, mm2);
908 qhp->ibqp.qp_num = qhp->wq.qpid;
909 callout_init(&(qhp->timer), TRUE);
910 CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
911 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
912 qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
913 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
914 1 << qhp->wq.size_log2);
918 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
919 int attr_mask, struct ib_udata *udata)
921 struct iwch_dev *rhp;
923 enum iwch_qp_attr_mask mask = 0;
924 struct iwch_qp_attributes attrs;
926 CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
928 /* iwarp does not support the RTR state */
929 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
930 attr_mask &= ~IB_QP_STATE;
932 /* Make sure we still have something left to do */
936 memset(&attrs, 0, sizeof attrs);
937 qhp = to_iwch_qp(ibqp);
940 attrs.next_state = iwch_convert_state(attr->qp_state);
941 attrs.enable_rdma_read = (attr->qp_access_flags &
942 IB_ACCESS_REMOTE_READ) ? 1 : 0;
943 attrs.enable_rdma_write = (attr->qp_access_flags &
944 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
945 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
948 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
949 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
950 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
951 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
952 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
954 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
957 void iwch_qp_add_ref(struct ib_qp *qp)
959 CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
960 mtx_lock(&to_iwch_qp(qp)->lock);
961 to_iwch_qp(qp)->refcnt++;
962 mtx_unlock(&to_iwch_qp(qp)->lock);
965 void iwch_qp_rem_ref(struct ib_qp *qp)
967 CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
968 mtx_lock(&to_iwch_qp(qp)->lock);
969 if (--to_iwch_qp(qp)->refcnt == 0)
970 wakeup(to_iwch_qp(qp));
971 mtx_unlock(&to_iwch_qp(qp)->lock);
974 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
976 CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
977 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
981 static int iwch_query_pkey(struct ib_device *ibdev,
982 u8 port, u16 index, u16 * pkey)
984 CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
989 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
990 int index, union ib_gid *gid)
992 struct iwch_dev *dev;
993 struct port_info *pi;
996 CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
997 __FUNCTION__, ibdev, port, index, gid);
998 dev = to_iwch_dev(ibdev);
1000 PANIC_IF(port == 0 || port > 2);
1001 pi = &sc->port[port - 1];
1002 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1003 memcpy(&(gid->raw[0]), pi->hw_addr, 6);
1007 static int iwch_query_device(struct ib_device *ibdev,
1008 struct ib_device_attr *props)
1010 struct iwch_dev *dev;
1013 CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1015 dev = to_iwch_dev(ibdev);
1016 sc = dev->rdev.adap;
1017 memset(props, 0, sizeof *props);
1018 memcpy(&props->sys_image_guid, sc->port[0].hw_addr, 6);
1019 props->device_cap_flags = dev->device_cap_flags;
1020 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1021 props->vendor_id = pci_get_vendor(sc->dev);
1022 props->vendor_part_id = pci_get_device(sc->dev);
1023 props->max_mr_size = dev->attr.max_mr_size;
1024 props->max_qp = dev->attr.max_qps;
1025 props->max_qp_wr = dev->attr.max_wrs;
1026 props->max_sge = dev->attr.max_sge_per_wr;
1027 props->max_sge_rd = 1;
1028 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1029 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1030 props->max_cq = dev->attr.max_cqs;
1031 props->max_cqe = dev->attr.max_cqes_per_cq;
1032 props->max_mr = dev->attr.max_mem_regs;
1033 props->max_pd = dev->attr.max_pds;
1034 props->local_ca_ack_delay = 0;
1039 static int iwch_query_port(struct ib_device *ibdev,
1040 u8 port, struct ib_port_attr *props)
1042 CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1043 memset(props, 0, sizeof(struct ib_port_attr));
1044 props->max_mtu = IB_MTU_4096;
1045 props->active_mtu = IB_MTU_2048;
1046 props->state = IB_PORT_ACTIVE;
1047 props->port_cap_flags =
1049 IB_PORT_SNMP_TUNNEL_SUP |
1050 IB_PORT_REINIT_SUP |
1051 IB_PORT_DEVICE_MGMT_SUP |
1052 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1053 props->gid_tbl_len = 1;
1054 props->pkey_tbl_len = 1;
1055 props->active_width = 2;
1056 props->active_speed = 2;
1057 props->max_msg_sz = -1;
1062 int iwch_register_device(struct iwch_dev *dev)
1065 struct adapter *sc = dev->rdev.adap;
1067 CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1068 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1069 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1070 memcpy(&dev->ibdev.node_guid, sc->port[0].hw_addr, 6);
1071 dev->device_cap_flags =
1072 (IB_DEVICE_LOCAL_DMA_LKEY |
1073 IB_DEVICE_MEM_WINDOW);
1075 dev->ibdev.uverbs_cmd_mask =
1076 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1077 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1078 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1079 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1080 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1081 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1082 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1083 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1084 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1085 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1086 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1087 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1088 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1089 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1090 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1091 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1092 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1093 dev->ibdev.node_type = RDMA_NODE_RNIC;
1094 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1095 dev->ibdev.phys_port_cnt = sc->params.nports;
1096 dev->ibdev.num_comp_vectors = 1;
1097 dev->ibdev.dma_device = dev->rdev.adap->dev;
1098 dev->ibdev.query_device = iwch_query_device;
1099 dev->ibdev.query_port = iwch_query_port;
1100 dev->ibdev.modify_port = iwch_modify_port;
1101 dev->ibdev.query_pkey = iwch_query_pkey;
1102 dev->ibdev.query_gid = iwch_query_gid;
1103 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1104 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1105 dev->ibdev.mmap = iwch_mmap;
1106 dev->ibdev.alloc_pd = iwch_allocate_pd;
1107 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1108 dev->ibdev.create_ah = iwch_ah_create;
1109 dev->ibdev.destroy_ah = iwch_ah_destroy;
1110 dev->ibdev.create_qp = iwch_create_qp;
1111 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1112 dev->ibdev.destroy_qp = iwch_destroy_qp;
1113 dev->ibdev.create_cq = iwch_create_cq;
1114 dev->ibdev.destroy_cq = iwch_destroy_cq;
1115 dev->ibdev.resize_cq = iwch_resize_cq;
1116 dev->ibdev.poll_cq = iwch_poll_cq;
1117 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1118 dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1119 dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1120 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1121 dev->ibdev.dereg_mr = iwch_dereg_mr;
1122 dev->ibdev.alloc_mw = iwch_alloc_mw;
1123 dev->ibdev.bind_mw = iwch_bind_mw;
1124 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1126 dev->ibdev.attach_mcast = iwch_multicast_attach;
1127 dev->ibdev.detach_mcast = iwch_multicast_detach;
1128 dev->ibdev.process_mad = iwch_process_mad;
1130 dev->ibdev.req_notify_cq = iwch_arm_cq;
1131 dev->ibdev.post_send = iwch_post_send;
1132 dev->ibdev.post_recv = iwch_post_receive;
1133 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1136 kmalloc(sizeof(struct iw_cm_verbs), M_NOWAIT);
1137 if (!dev->ibdev.iwcm)
1140 dev->ibdev.iwcm->connect = iwch_connect;
1141 dev->ibdev.iwcm->accept = iwch_accept_cr;
1142 dev->ibdev.iwcm->reject = iwch_reject_cr;
1143 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1144 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1145 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1146 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1147 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1149 ret = ib_register_device(&dev->ibdev, NULL);
1156 cxfree(dev->ibdev.iwcm);
1160 void iwch_unregister_device(struct iwch_dev *dev)
1163 ib_unregister_device(&dev->ibdev);
1164 cxfree(dev->ibdev.iwcm);