]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / dev / cxgb / ulp / iw_cxgb / iw_cxgb_provider.c
1 /**************************************************************************
2
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/module.h>
37 #include <sys/pciio.h>
38 #include <sys/conf.h>
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 #include <sys/bus_dma.h>
42 #include <sys/rman.h>
43 #include <sys/ioccom.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/rwlock.h>
47 #include <sys/linker.h>
48 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
56 #include <sys/proc.h>
57 #include <sys/queue.h>
58
59 #include <netinet/in.h>
60
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #include <contrib/rdma/ib_verbs.h>
66 #include <contrib/rdma/ib_umem.h>
67 #include <contrib/rdma/ib_user_verbs.h>
68
69
70
71
72 #ifdef CONFIG_DEFINED
73 #include <cxgb_include.h>
74 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
75 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
76 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
77 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
78 #include <ulp/iw_cxgb/iw_cxgb.h>
79 #include <ulp/iw_cxgb/iw_cxgb_resource.h>
80 #include <ulp/iw_cxgb/iw_cxgb_user.h>
81 #else
82 #include <dev/cxgb/cxgb_include.h>
83 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_wr.h>
84 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_hal.h>
85 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.h>
86 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.h>
87 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb.h>
88 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_resource.h>
89 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_user.h>
90 #endif
91
92
93 static int
94 iwch_modify_port(struct ib_device *ibdev,
95                             u8 port, int port_modify_mask,
96                             struct ib_port_modify *props)
97 {
98         return (-ENOSYS);
99 }
100
101 static struct ib_ah *
102 iwch_ah_create(struct ib_pd *pd,
103                                     struct ib_ah_attr *ah_attr)
104 {
105         return ERR_PTR(-ENOSYS);
106 }
107
108 static int
109 iwch_ah_destroy(struct ib_ah *ah)
110 {
111         return (-ENOSYS);
112 }
113
114 static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
115 {
116         return (-ENOSYS);
117 }
118
119 static int
120 iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
121 {
122         return (-ENOSYS);
123 }
124
125 static int
126 iwch_process_mad(struct ib_device *ibdev,
127                             int mad_flags,
128                             u8 port_num,
129                             struct ib_wc *in_wc,
130                             struct ib_grh *in_grh,
131                             struct ib_mad *in_mad, struct ib_mad *out_mad)
132 {
133         return (-ENOSYS);
134 }
135
136 static int
137 iwch_dealloc_ucontext(struct ib_ucontext *context)
138 {
139         struct iwch_dev *rhp = to_iwch_dev(context->device);
140         struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
141         struct iwch_mm_entry *mm, *tmp;
142
143         CTR2(KTR_IW_CXGB, "%s context %p", __FUNCTION__, context);
144         TAILQ_FOREACH_SAFE(mm, &ucontext->mmaps, entry, tmp) {
145                 TAILQ_REMOVE(&ucontext->mmaps, mm, entry);
146                 cxfree(mm);
147         }
148         cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
149         cxfree(ucontext);
150         return 0;
151 }
152
153 static struct ib_ucontext *
154 iwch_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
155 {
156         struct iwch_ucontext *context;
157         struct iwch_dev *rhp = to_iwch_dev(ibdev);
158
159         CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
160         context = malloc(sizeof(*context), M_DEVBUF, M_ZERO|M_NOWAIT);
161         if (!context)
162                 return ERR_PTR(-ENOMEM);
163         cxio_init_ucontext(&rhp->rdev, &context->uctx);
164         TAILQ_INIT(&context->mmaps);
165         mtx_init(&context->mmap_lock, "ucontext mmap", NULL, MTX_DEF);
166         return &context->ibucontext;
167 }
168
169 static int
170 iwch_destroy_cq(struct ib_cq *ib_cq)
171 {
172         struct iwch_cq *chp;
173
174         CTR2(KTR_IW_CXGB, "%s ib_cq %p", __FUNCTION__, ib_cq);
175         chp = to_iwch_cq(ib_cq);
176
177         remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
178         mtx_lock(&chp->lock);
179         if (--chp->refcnt)
180                 msleep(chp, &chp->lock, 0, "iwch_destroy_cq", 0);
181         mtx_unlock(&chp->lock);
182
183         cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
184         cxfree(chp);
185         return 0;
186 }
187
188 static struct ib_cq *
189 iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
190                              struct ib_ucontext *ib_context,
191                              struct ib_udata *udata)
192 {
193         struct iwch_dev *rhp;
194         struct iwch_cq *chp;
195         struct iwch_create_cq_resp uresp;
196         struct iwch_create_cq_req ureq;
197         struct iwch_ucontext *ucontext = NULL;
198
199         CTR3(KTR_IW_CXGB, "%s ib_dev %p entries %d", __FUNCTION__, ibdev, entries);
200         rhp = to_iwch_dev(ibdev);
201         chp = malloc(sizeof(*chp), M_DEVBUF, M_NOWAIT|M_ZERO);
202         if (!chp) {
203                 return ERR_PTR(-ENOMEM);
204         }
205         if (ib_context) {
206                 ucontext = to_iwch_ucontext(ib_context);
207                 if (!t3a_device(rhp)) {
208                         if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
209                                 cxfree(chp);
210                                 return ERR_PTR(-EFAULT);
211                         }
212                         chp->user_rptr_addr = (u32 /*__user */*)(unsigned long)ureq.user_rptr_addr;
213                 }
214         }
215
216         if (t3a_device(rhp)) {
217
218                 /*
219                  * T3A: Add some fluff to handle extra CQEs inserted
220                  * for various errors.
221                  * Additional CQE possibilities:
222                  *      TERMINATE,
223                  *      incoming RDMA WRITE Failures
224                  *      incoming RDMA READ REQUEST FAILUREs
225                  * NOTE: We cannot ensure the CQ won't overflow.
226                  */
227                 entries += 16;
228         }
229         entries = roundup_pow_of_two(entries);
230         chp->cq.size_log2 = ilog2(entries);
231
232         if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
233                 cxfree(chp);
234                 return ERR_PTR(-ENOMEM);
235         }
236         chp->rhp = rhp;
237         chp->ibcq.cqe = 1 << chp->cq.size_log2;
238         mtx_init(&chp->lock, "cxgb cq", NULL, MTX_DEF|MTX_DUPOK);
239         chp->refcnt = 1;
240         insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
241
242         if (ucontext) {
243                 struct iwch_mm_entry *mm;
244
245                 mm = kmalloc(sizeof *mm, M_NOWAIT);
246                 if (!mm) {
247                         iwch_destroy_cq(&chp->ibcq);
248                         return ERR_PTR(-ENOMEM);
249                 }
250                 uresp.cqid = chp->cq.cqid;
251                 uresp.size_log2 = chp->cq.size_log2;
252                 mtx_lock(&ucontext->mmap_lock);
253                 uresp.key = ucontext->key;
254                 ucontext->key += PAGE_SIZE;
255                 mtx_unlock(&ucontext->mmap_lock);
256                 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
257                         cxfree(mm);
258                         iwch_destroy_cq(&chp->ibcq);
259                         return ERR_PTR(-EFAULT);
260                 }
261                 mm->key = uresp.key;
262                 mm->addr = vtophys(chp->cq.queue);
263                 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
264                                              sizeof (struct t3_cqe));
265                 insert_mmap(ucontext, mm);
266         }
267         CTR4(KTR_IW_CXGB, "created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx",
268              chp->cq.cqid, chp, (1 << chp->cq.size_log2),
269              (unsigned long long) chp->cq.dma_addr);
270         return &chp->ibcq;
271 }
272
273 static int
274 iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
275 {
276 #ifdef notyet
277         struct iwch_cq *chp = to_iwch_cq(cq);
278         struct t3_cq oldcq, newcq;
279         int ret;
280
281         CTR3(KTR_IW_CXGB, "%s ib_cq %p cqe %d", __FUNCTION__, cq, cqe);
282
283         /* We don't downsize... */
284         if (cqe <= cq->cqe)
285                 return 0;
286
287         /* create new t3_cq with new size */
288         cqe = roundup_pow_of_two(cqe+1);
289         newcq.size_log2 = ilog2(cqe);
290
291         /* Dont allow resize to less than the current wce count */
292         if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
293                 return (-ENOMEM);
294         }
295
296         /* Quiesce all QPs using this CQ */
297         ret = iwch_quiesce_qps(chp);
298         if (ret) {
299                 return (ret);
300         }
301
302         ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
303         if (ret) {
304                 return (ret);
305         }
306
307         /* copy CQEs */
308         memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
309                                         sizeof(struct t3_cqe));
310
311         /* old iwch_qp gets new t3_cq but keeps old cqid */
312         oldcq = chp->cq;
313         chp->cq = newcq;
314         chp->cq.cqid = oldcq.cqid;
315
316         /* resize new t3_cq to update the HW context */
317         ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
318         if (ret) {
319                 chp->cq = oldcq;
320                 return ret;
321         }
322         chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
323
324         /* destroy old t3_cq */
325         oldcq.cqid = newcq.cqid;
326         ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
327         if (ret) {
328                 log(LOG_ERR, "%s - cxio_destroy_cq failed %d\n",
329                         __FUNCTION__, ret);
330         }
331
332         /* add user hooks here */
333
334         /* resume qps */
335         ret = iwch_resume_qps(chp);
336         return ret;
337 #else
338         return (-ENOSYS);
339 #endif
340 }
341
342 static int
343 iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
344 {
345         struct iwch_dev *rhp;
346         struct iwch_cq *chp;
347         enum t3_cq_opcode cq_op;
348         int err;
349         u32 rptr;
350
351         chp = to_iwch_cq(ibcq);
352         rhp = chp->rhp;
353         if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
354                 cq_op = CQ_ARM_SE;
355         else
356                 cq_op = CQ_ARM_AN;
357         if (chp->user_rptr_addr) {
358                 if (copyin(&rptr, chp->user_rptr_addr, 4))
359                         return (-EFAULT);
360                 mtx_lock(&chp->lock);
361                 chp->cq.rptr = rptr;
362         } else
363                 mtx_lock(&chp->lock);
364         CTR2(KTR_IW_CXGB, "%s rptr 0x%x", __FUNCTION__, chp->cq.rptr);
365         err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
366         mtx_unlock(&chp->lock);
367         if (err < 0)
368                 log(LOG_ERR, "Error %d rearming CQID 0x%x\n", err,
369                        chp->cq.cqid);
370         if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
371                 err = 0;
372         return err;
373 }
374
375 #ifdef notyet
376 static int
377 iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
378 {
379 #ifdef notyet   
380         int len = vma->vm_end - vma->vm_start;
381         u32 key = vma->vm_pgoff << PAGE_SHIFT;
382         struct cxio_rdev *rdev_p;
383         int ret = 0;
384         struct iwch_mm_entry *mm;
385         struct iwch_ucontext *ucontext;
386         u64 addr;
387
388         CTR4(KTR_IW_CXGB, "%s pgoff 0x%lx key 0x%x len %d", __FUNCTION__, vma->vm_pgoff,
389              key, len);
390
391         if (vma->vm_start & (PAGE_SIZE-1)) {
392                 return (-EINVAL);
393         }
394
395         rdev_p = &(to_iwch_dev(context->device)->rdev);
396         ucontext = to_iwch_ucontext(context);
397
398         mm = remove_mmap(ucontext, key, len);
399         if (!mm)
400                 return (-EINVAL);
401         addr = mm->addr;
402         cxfree(mm);
403
404         if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
405             (addr < (rdev_p->rnic_info.udbell_physbase +
406                        rdev_p->rnic_info.udbell_len))) {
407
408                 /*
409                  * Map T3 DB register.
410                  */
411                 if (vma->vm_flags & VM_READ) {
412                         return (-EPERM);
413                 }
414
415                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
416                 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
417                 vma->vm_flags &= ~VM_MAYREAD;
418                 ret = io_remap_pfn_range(vma, vma->vm_start,
419                                          addr >> PAGE_SHIFT,
420                                          len, vma->vm_page_prot);
421         } else {
422
423                 /*
424                  * Map WQ or CQ contig dma memory...
425                  */
426                 ret = remap_pfn_range(vma, vma->vm_start,
427                                       addr >> PAGE_SHIFT,
428                                       len, vma->vm_page_prot);
429         }
430
431         return ret;
432 #endif
433         return (0);
434 }
435 #endif
436
437 static int iwch_deallocate_pd(struct ib_pd *pd)
438 {
439         struct iwch_dev *rhp;
440         struct iwch_pd *php;
441
442         php = to_iwch_pd(pd);
443         rhp = php->rhp;
444         CTR3(KTR_IW_CXGB, "%s ibpd %p pdid 0x%x", __FUNCTION__, pd, php->pdid);
445         cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
446         cxfree(php);
447         return 0;
448 }
449
450 static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
451                                struct ib_ucontext *context,
452                                struct ib_udata *udata)
453 {
454         struct iwch_pd *php;
455         u32 pdid;
456         struct iwch_dev *rhp;
457
458         CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
459         rhp = (struct iwch_dev *) ibdev;
460         pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
461         if (!pdid)
462                 return ERR_PTR(-EINVAL);
463         php = malloc(sizeof(*php), M_DEVBUF, M_ZERO|M_NOWAIT);
464         if (!php) {
465                 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
466                 return ERR_PTR(-ENOMEM);
467         }
468         php->pdid = pdid;
469         php->rhp = rhp;
470         if (context) {
471                 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
472                         iwch_deallocate_pd(&php->ibpd);
473                         return ERR_PTR(-EFAULT);
474                 }
475         }
476         CTR3(KTR_IW_CXGB, "%s pdid 0x%0x ptr 0x%p", __FUNCTION__, pdid, php);
477         return &php->ibpd;
478 }
479
480 static int iwch_dereg_mr(struct ib_mr *ib_mr)
481 {
482         struct iwch_dev *rhp;
483         struct iwch_mr *mhp;
484         u32 mmid;
485
486         CTR2(KTR_IW_CXGB, "%s ib_mr %p", __FUNCTION__, ib_mr);
487         /* There can be no memory windows */
488         if (atomic_load_acq_int(&ib_mr->usecnt))
489                 return (-EINVAL);
490
491         mhp = to_iwch_mr(ib_mr);
492         rhp = mhp->rhp;
493         mmid = mhp->attr.stag >> 8;
494         cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
495                        mhp->attr.pbl_addr);
496         remove_handle(rhp, &rhp->mmidr, mmid);
497         if (mhp->kva)
498                 cxfree((void *) (unsigned long) mhp->kva);
499         if (mhp->umem)
500                 ib_umem_release(mhp->umem);
501         CTR3(KTR_IW_CXGB, "%s mmid 0x%x ptr %p", __FUNCTION__, mmid, mhp);
502         cxfree(mhp);
503         return 0;
504 }
505
506 static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
507                                         struct ib_phys_buf *buffer_list,
508                                         int num_phys_buf,
509                                         int acc,
510                                         u64 *iova_start)
511 {
512         __be64 *page_list;
513         int shift;
514         u64 total_size;
515         int npages;
516         struct iwch_dev *rhp;
517         struct iwch_pd *php;
518         struct iwch_mr *mhp;
519         int ret;
520
521         CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
522         php = to_iwch_pd(pd);
523         rhp = php->rhp;
524
525         mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
526         if (!mhp)
527                 return ERR_PTR(-ENOMEM);
528
529         /* First check that we have enough alignment */
530         if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
531                 ret = -EINVAL;
532                 goto err;
533         }
534
535         if (num_phys_buf > 1 &&
536             ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
537                 ret = -EINVAL;
538                 goto err;
539         }
540
541         ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
542                                    &total_size, &npages, &shift, &page_list);
543         if (ret)
544                 goto err;
545
546         mhp->rhp = rhp;
547         mhp->attr.pdid = php->pdid;
548         mhp->attr.zbva = 0;
549
550         mhp->attr.perms = iwch_ib_to_tpt_access(acc);
551         mhp->attr.va_fbo = *iova_start;
552         mhp->attr.page_size = shift - 12;
553
554         mhp->attr.len = (u32) total_size;
555         mhp->attr.pbl_size = npages;
556         ret = iwch_register_mem(rhp, php, mhp, shift, page_list);
557         cxfree(page_list);
558         if (ret) {
559                 goto err;
560         }
561         return &mhp->ibmr;
562 err:
563         cxfree(mhp);
564         return ERR_PTR(-ret);
565
566 }
567
568 static int iwch_reregister_phys_mem(struct ib_mr *mr,
569                                      int mr_rereg_mask,
570                                      struct ib_pd *pd,
571                                      struct ib_phys_buf *buffer_list,
572                                      int num_phys_buf,
573                                      int acc, u64 * iova_start)
574 {
575
576         struct iwch_mr mh, *mhp;
577         struct iwch_pd *php;
578         struct iwch_dev *rhp;
579         __be64 *page_list = NULL;
580         int shift = 0;
581         u64 total_size;
582         int npages;
583         int ret;
584
585         CTR3(KTR_IW_CXGB, "%s ib_mr %p ib_pd %p", __FUNCTION__, mr, pd);
586
587         /* There can be no memory windows */
588         if (atomic_load_acq_int(&mr->usecnt))
589                 return (-EINVAL);
590
591         mhp = to_iwch_mr(mr);
592         rhp = mhp->rhp;
593         php = to_iwch_pd(mr->pd);
594
595         /* make sure we are on the same adapter */
596         if (rhp != php->rhp)
597                 return (-EINVAL);
598
599         memcpy(&mh, mhp, sizeof *mhp);
600
601         if (mr_rereg_mask & IB_MR_REREG_PD)
602                 php = to_iwch_pd(pd);
603         if (mr_rereg_mask & IB_MR_REREG_ACCESS)
604                 mh.attr.perms = iwch_ib_to_tpt_access(acc);
605         if (mr_rereg_mask & IB_MR_REREG_TRANS) {
606                 ret = build_phys_page_list(buffer_list, num_phys_buf,
607                                            iova_start,
608                                            &total_size, &npages,
609                                            &shift, &page_list);
610                 if (ret)
611                         return ret;
612         }
613
614         ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages);
615         cxfree(page_list);
616         if (ret) {
617                 return ret;
618         }
619         if (mr_rereg_mask & IB_MR_REREG_PD)
620                 mhp->attr.pdid = php->pdid;
621         if (mr_rereg_mask & IB_MR_REREG_ACCESS)
622                 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
623         if (mr_rereg_mask & IB_MR_REREG_TRANS) {
624                 mhp->attr.zbva = 0;
625                 mhp->attr.va_fbo = *iova_start;
626                 mhp->attr.page_size = shift - 12;
627                 mhp->attr.len = (u32) total_size;
628                 mhp->attr.pbl_size = npages;
629         }
630
631         return 0;
632 }
633
634
635 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
636                                       u64 virt, int acc, struct ib_udata *udata)
637 {
638         __be64 *pages;
639         int shift, i, n;
640         int err = 0;
641         struct ib_umem_chunk *chunk;
642         struct iwch_dev *rhp;
643         struct iwch_pd *php;
644         struct iwch_mr *mhp;
645         struct iwch_reg_user_mr_resp uresp;
646 #ifdef notyet
647         int j, k, len;
648 #endif  
649         
650         CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
651
652         php = to_iwch_pd(pd);
653         rhp = php->rhp;
654         mhp = malloc(sizeof(*mhp), M_DEVBUF, M_NOWAIT|M_ZERO);
655         if (!mhp)
656                 return ERR_PTR(-ENOMEM);
657
658         mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
659         if (IS_ERR(mhp->umem)) {
660                 err = PTR_ERR(mhp->umem);
661                 cxfree(mhp);
662                 return ERR_PTR(-err);
663         }
664
665         shift = ffs(mhp->umem->page_size) - 1;
666
667         n = 0;
668         TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
669                 n += chunk->nents;
670
671         pages = kmalloc(n * sizeof(u64), M_NOWAIT);
672         if (!pages) {
673                 err = -ENOMEM;
674                 goto err;
675         }
676
677         i = n = 0;
678
679 #if 0   
680         TAILQ_FOREACH(chunk, &mhp->umem->chunk_list, entry)
681                 for (j = 0; j < chunk->nmap; ++j) {
682                         len = sg_dma_len(&chunk->page_list[j]) >> shift;
683                         for (k = 0; k < len; ++k) {
684                                 pages[i++] = htobe64(sg_dma_address(
685                                         &chunk->page_list[j]) +
686                                         mhp->umem->page_size * k);
687                         }
688                 }
689 #endif
690         mhp->rhp = rhp;
691         mhp->attr.pdid = php->pdid;
692         mhp->attr.zbva = 0;
693         mhp->attr.perms = iwch_ib_to_tpt_access(acc);
694         mhp->attr.va_fbo = virt;
695         mhp->attr.page_size = shift - 12;
696         mhp->attr.len = (u32) length;
697         mhp->attr.pbl_size = i;
698         err = iwch_register_mem(rhp, php, mhp, shift, pages);
699         cxfree(pages);
700         if (err)
701                 goto err;
702
703         if (udata && !t3a_device(rhp)) {
704                 uresp.pbl_addr = (mhp->attr.pbl_addr -
705                                  rhp->rdev.rnic_info.pbl_base) >> 3;
706                 CTR2(KTR_IW_CXGB, "%s user resp pbl_addr 0x%x", __FUNCTION__,
707                      uresp.pbl_addr);
708
709                 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
710                         iwch_dereg_mr(&mhp->ibmr);
711                         err = EFAULT;
712                         goto err;
713                 }
714         }
715
716         return &mhp->ibmr;
717
718 err:
719         ib_umem_release(mhp->umem);
720         cxfree(mhp);
721         return ERR_PTR(-err);
722 }
723
724 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
725 {
726         struct ib_phys_buf bl;
727         u64 kva;
728         struct ib_mr *ibmr;
729
730         CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
731
732         /*
733          * T3 only supports 32 bits of size.
734          */
735         bl.size = 0xffffffff;
736         bl.addr = 0;
737         kva = 0;
738         ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
739         return ibmr;
740 }
741
742 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
743 {
744         struct iwch_dev *rhp;
745         struct iwch_pd *php;
746         struct iwch_mw *mhp;
747         u32 mmid;
748         u32 stag = 0;
749         int ret;
750
751         php = to_iwch_pd(pd);
752         rhp = php->rhp;
753         mhp = malloc(sizeof(*mhp), M_DEVBUF, M_ZERO|M_NOWAIT);
754         if (!mhp)
755                 return ERR_PTR(-ENOMEM);
756         ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
757         if (ret) {
758                 cxfree(mhp);
759                 return ERR_PTR(-ret);
760         }
761         mhp->rhp = rhp;
762         mhp->attr.pdid = php->pdid;
763         mhp->attr.type = TPT_MW;
764         mhp->attr.stag = stag;
765         mmid = (stag) >> 8;
766         insert_handle(rhp, &rhp->mmidr, mhp, mmid);
767         CTR4(KTR_IW_CXGB, "%s mmid 0x%x mhp %p stag 0x%x", __FUNCTION__, mmid, mhp, stag);
768         return &(mhp->ibmw);
769 }
770
771 static int iwch_dealloc_mw(struct ib_mw *mw)
772 {
773         struct iwch_dev *rhp;
774         struct iwch_mw *mhp;
775         u32 mmid;
776
777         mhp = to_iwch_mw(mw);
778         rhp = mhp->rhp;
779         mmid = (mw->rkey) >> 8;
780         cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
781         remove_handle(rhp, &rhp->mmidr, mmid);
782         cxfree(mhp);
783         CTR4(KTR_IW_CXGB, "%s ib_mw %p mmid 0x%x ptr %p", __FUNCTION__, mw, mmid, mhp);
784         return 0;
785 }
786
787 static int iwch_destroy_qp(struct ib_qp *ib_qp)
788 {
789         struct iwch_dev *rhp;
790         struct iwch_qp *qhp;
791         struct iwch_qp_attributes attrs;
792         struct iwch_ucontext *ucontext;
793
794         qhp = to_iwch_qp(ib_qp);
795         rhp = qhp->rhp;
796
797         attrs.next_state = IWCH_QP_STATE_ERROR;
798         iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
799         mtx_lock(&qhp->lock);
800         if (qhp->ep)
801                 msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp1", 0);
802         mtx_unlock(&qhp->lock);
803
804         remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
805
806         mtx_lock(&qhp->lock);
807         if (--qhp->refcnt)
808                 msleep(qhp, &qhp->lock, 0, "iwch_destroy_qp2", 0);
809         mtx_unlock(&qhp->lock);
810
811         ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
812                                   : NULL;
813         cxio_destroy_qp(&rhp->rdev, &qhp->wq,
814                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
815
816         CTR4(KTR_IW_CXGB, "%s ib_qp %p qpid 0x%0x qhp %p", __FUNCTION__,
817              ib_qp, qhp->wq.qpid, qhp);
818         cxfree(qhp);
819         return 0;
820 }
821
822 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
823                              struct ib_qp_init_attr *attrs,
824                              struct ib_udata *udata)
825 {
826         struct iwch_dev *rhp;
827         struct iwch_qp *qhp;
828         struct iwch_pd *php;
829         struct iwch_cq *schp;
830         struct iwch_cq *rchp;
831         struct iwch_create_qp_resp uresp;
832         int wqsize, sqsize, rqsize;
833         struct iwch_ucontext *ucontext;
834
835         CTR2(KTR_IW_CXGB, "%s ib_pd %p", __FUNCTION__, pd);
836         if (attrs->qp_type != IB_QPT_RC)
837                 return ERR_PTR(-EINVAL);
838         php = to_iwch_pd(pd);
839         rhp = php->rhp;
840         schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
841         rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
842         if (!schp || !rchp)
843                 return ERR_PTR(-EINVAL);
844
845         /* The RQT size must be # of entries + 1 rounded up to a power of two */
846         rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
847         if (rqsize == attrs->cap.max_recv_wr)
848                 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
849
850         /* T3 doesn't support RQT depth < 16 */
851         if (rqsize < 16)
852                 rqsize = 16;
853
854         if (rqsize > T3_MAX_RQ_SIZE)
855                 return ERR_PTR(-EINVAL);
856
857         if (attrs->cap.max_inline_data > T3_MAX_INLINE)
858                 return ERR_PTR(-EINVAL);
859
860         /*
861          * NOTE: The SQ and total WQ sizes don't need to be
862          * a power of two.  However, all the code assumes
863          * they are. EG: Q_FREECNT() and friends.
864          */
865         sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
866         wqsize = roundup_pow_of_two(rqsize + sqsize);
867         CTR4(KTR_IW_CXGB, "%s wqsize %d sqsize %d rqsize %d", __FUNCTION__,
868              wqsize, sqsize, rqsize);
869         qhp = malloc(sizeof(*qhp), M_DEVBUF, M_ZERO|M_NOWAIT);
870         if (!qhp)
871                 return ERR_PTR(-ENOMEM);
872         qhp->wq.size_log2 = ilog2(wqsize);
873         qhp->wq.rq_size_log2 = ilog2(rqsize);
874         qhp->wq.sq_size_log2 = ilog2(sqsize);
875         ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
876         if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
877                            ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
878                 cxfree(qhp);
879                 return ERR_PTR(-ENOMEM);
880         }
881
882         attrs->cap.max_recv_wr = rqsize - 1;
883         attrs->cap.max_send_wr = sqsize;
884         attrs->cap.max_inline_data = T3_MAX_INLINE;
885
886         qhp->rhp = rhp;
887         qhp->attr.pd = php->pdid;
888         qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
889         qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
890         qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
891         qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
892         qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
893         qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
894         qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
895         qhp->attr.state = IWCH_QP_STATE_IDLE;
896         qhp->attr.next_state = IWCH_QP_STATE_IDLE;
897
898         /*
899          * XXX - These don't get passed in from the openib user
900          * at create time.  The CM sets them via a QP modify.
901          * Need to fix...  I think the CM should
902          */
903         qhp->attr.enable_rdma_read = 1;
904         qhp->attr.enable_rdma_write = 1;
905         qhp->attr.enable_bind = 1;
906         qhp->attr.max_ord = 1;
907         qhp->attr.max_ird = 1;
908
909         mtx_init(&qhp->lock, "cxgb qp", NULL, MTX_DEF|MTX_DUPOK);
910         qhp->refcnt = 1;
911         insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
912
913         if (udata) {
914
915                 struct iwch_mm_entry *mm1, *mm2;
916
917                 mm1 = kmalloc(sizeof *mm1, M_NOWAIT);
918                 if (!mm1) {
919                         iwch_destroy_qp(&qhp->ibqp);
920                         return ERR_PTR(-ENOMEM);
921                 }
922
923                 mm2 = kmalloc(sizeof *mm2, M_NOWAIT);
924                 if (!mm2) {
925                         cxfree(mm1);
926                         iwch_destroy_qp(&qhp->ibqp);
927                         return ERR_PTR(-ENOMEM);
928                 }
929
930                 uresp.qpid = qhp->wq.qpid;
931                 uresp.size_log2 = qhp->wq.size_log2;
932                 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
933                 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
934                 mtx_lock(&ucontext->mmap_lock);
935                 uresp.key = ucontext->key;
936                 ucontext->key += PAGE_SIZE;
937                 uresp.db_key = ucontext->key;
938                 ucontext->key += PAGE_SIZE;
939                 mtx_unlock(&ucontext->mmap_lock);
940                 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
941                         cxfree(mm1);
942                         cxfree(mm2);
943                         iwch_destroy_qp(&qhp->ibqp);
944                         return ERR_PTR(-EFAULT);
945                 }
946                 mm1->key = uresp.key;
947                 mm1->addr = vtophys(qhp->wq.queue);
948                 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
949                 insert_mmap(ucontext, mm1);
950                 mm2->key = uresp.db_key;
951                 mm2->addr = qhp->wq.udb & PAGE_MASK;
952                 mm2->len = PAGE_SIZE;
953                 insert_mmap(ucontext, mm2);
954         }
955         qhp->ibqp.qp_num = qhp->wq.qpid;
956         callout_init(&(qhp->timer), TRUE);
957         CTR6(KTR_IW_CXGB, "sq_num_entries %d, rq_num_entries %d "
958              "qpid 0x%0x qhp %p dma_addr 0x%llx size %d",
959              qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
960              qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
961              1 << qhp->wq.size_log2);
962         return &qhp->ibqp;
963 }
964
965 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
966                       int attr_mask, struct ib_udata *udata)
967 {
968         struct iwch_dev *rhp;
969         struct iwch_qp *qhp;
970         enum iwch_qp_attr_mask mask = 0;
971         struct iwch_qp_attributes attrs;
972
973         CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, ibqp);
974
975         /* iwarp does not support the RTR state */
976         if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
977                 attr_mask &= ~IB_QP_STATE;
978
979         /* Make sure we still have something left to do */
980         if (!attr_mask)
981                 return 0;
982
983         memset(&attrs, 0, sizeof attrs);
984         qhp = to_iwch_qp(ibqp);
985         rhp = qhp->rhp;
986
987         attrs.next_state = iwch_convert_state(attr->qp_state);
988         attrs.enable_rdma_read = (attr->qp_access_flags &
989                                IB_ACCESS_REMOTE_READ) ?  1 : 0;
990         attrs.enable_rdma_write = (attr->qp_access_flags &
991                                 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
992         attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
993
994
995         mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
996         mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
997                         (IWCH_QP_ATTR_ENABLE_RDMA_READ |
998                          IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
999                          IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1000
1001         return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1002 }
1003
1004 void iwch_qp_add_ref(struct ib_qp *qp)
1005 {
1006         CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
1007         mtx_lock(&to_iwch_qp(qp)->lock);
1008         to_iwch_qp(qp)->refcnt++;
1009         mtx_unlock(&to_iwch_qp(qp)->lock);
1010 }
1011
1012 void iwch_qp_rem_ref(struct ib_qp *qp)
1013 {
1014         CTR2(KTR_IW_CXGB, "%s ib_qp %p", __FUNCTION__, qp);
1015         mtx_lock(&to_iwch_qp(qp)->lock);
1016         if (--to_iwch_qp(qp)->refcnt == 0)
1017                 wakeup(to_iwch_qp(qp));
1018         mtx_unlock(&to_iwch_qp(qp)->lock);
1019 }
1020
1021 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
1022 {
1023         CTR3(KTR_IW_CXGB, "%s ib_dev %p qpn 0x%x", __FUNCTION__, dev, qpn);
1024         return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1025 }
1026
1027
1028 static int iwch_query_pkey(struct ib_device *ibdev,
1029                            u8 port, u16 index, u16 * pkey)
1030 {
1031         CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1032         *pkey = 0;
1033         return 0;
1034 }
1035
1036 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1037                           int index, union ib_gid *gid)
1038 {
1039         struct iwch_dev *dev;
1040         struct port_info *pi;
1041
1042         CTR5(KTR_IW_CXGB, "%s ibdev %p, port %d, index %d, gid %p",
1043                __FUNCTION__, ibdev, port, index, gid);
1044         dev = to_iwch_dev(ibdev);
1045         PANIC_IF(port == 0 || port > 2);
1046         pi = ((struct port_info *)dev->rdev.port_info.lldevs[port-1]->if_softc);
1047         memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1048         memcpy(&(gid->raw[0]), pi->hw_addr, 6);
1049         return 0;
1050 }
1051
1052 static int iwch_query_device(struct ib_device *ibdev,
1053                              struct ib_device_attr *props)
1054 {
1055
1056         struct iwch_dev *dev;
1057         CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1058
1059         dev = to_iwch_dev(ibdev);
1060         memset(props, 0, sizeof *props);
1061 #ifdef notyet   
1062         memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->if_addr.ifa_addr, 6);
1063 #endif  
1064         props->device_cap_flags = dev->device_cap_flags;
1065 #ifdef notyet
1066         props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1067         props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1068 #endif
1069         props->max_mr_size = ~0ull;
1070         props->max_qp = dev->attr.max_qps;
1071         props->max_qp_wr = dev->attr.max_wrs;
1072         props->max_sge = dev->attr.max_sge_per_wr;
1073         props->max_sge_rd = 1;
1074         props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1075         props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1076         props->max_cq = dev->attr.max_cqs;
1077         props->max_cqe = dev->attr.max_cqes_per_cq;
1078         props->max_mr = dev->attr.max_mem_regs;
1079         props->max_pd = dev->attr.max_pds;
1080         props->local_ca_ack_delay = 0;
1081
1082         return 0;
1083 }
1084
1085 static int iwch_query_port(struct ib_device *ibdev,
1086                            u8 port, struct ib_port_attr *props)
1087 {
1088         CTR2(KTR_IW_CXGB, "%s ibdev %p", __FUNCTION__, ibdev);
1089         props->max_mtu = IB_MTU_4096;
1090         props->lid = 0;
1091         props->lmc = 0;
1092         props->sm_lid = 0;
1093         props->sm_sl = 0;
1094         props->state = IB_PORT_ACTIVE;
1095         props->phys_state = 0;
1096         props->port_cap_flags =
1097             IB_PORT_CM_SUP |
1098             IB_PORT_SNMP_TUNNEL_SUP |
1099             IB_PORT_REINIT_SUP |
1100             IB_PORT_DEVICE_MGMT_SUP |
1101             IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1102         props->gid_tbl_len = 1;
1103         props->pkey_tbl_len = 1;
1104         props->qkey_viol_cntr = 0;
1105         props->active_width = 2;
1106         props->active_speed = 2;
1107         props->max_msg_sz = -1;
1108
1109         return 0;
1110 }
1111
1112 #ifdef notyet
1113 static ssize_t show_rev(struct class_device *cdev, char *buf)
1114 {
1115         struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1116                                             ibdev.class_dev);
1117         CTR2(KTR_IW_CXGB, "%s class dev 0x%p", __FUNCTION__, cdev);
1118         return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type);
1119 }
1120
1121 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1122 {
1123         struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1124                                             ibdev.class_dev);
1125         struct ethtool_drvinfo info;
1126         struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1127
1128         CTR2(KTR_IW_CXGB, "%s class dev 0x%p", __FUNCTION__, cdev);
1129         lldev->ethtool_ops->get_drvinfo(lldev, &info);
1130         return sprintf(buf, "%s\n", info.fw_version);
1131 }
1132
1133 static ssize_t show_hca(struct class_device *cdev, char *buf)
1134 {
1135         struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1136                                             ibdev.class_dev);
1137         struct ethtool_drvinfo info;
1138         struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1139
1140         CTR2(KTR_IW_CXGB, "%s class dev 0x%p", __FUNCTION__, cdev);
1141         lldev->ethtool_ops->get_drvinfo(lldev, &info);
1142         return sprintf(buf, "%s\n", info.driver);
1143 }
1144
1145 static ssize_t show_board(struct class_device *cdev, char *buf)
1146 {
1147         struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1148                                             ibdev.class_dev);
1149         CTR2(KTR_IW_CXGB, "%s class dev 0x%p", __FUNCTION__, dev);
1150 #ifdef notyet
1151         return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor,
1152                                        dev->rdev.rnic_info.pdev->device);
1153 #else
1154         return sprintf(buf, "%x.%x\n", 0xdead, 0xbeef);  /* XXX */
1155 #endif
1156 }
1157
1158 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1159 static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1160 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1161 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1162
1163 static struct class_device_attribute *iwch_class_attributes[] = {
1164         &class_device_attr_hw_rev,
1165         &class_device_attr_fw_ver,
1166         &class_device_attr_hca_type,
1167         &class_device_attr_board_id
1168 };
1169 #endif
1170
1171 int iwch_register_device(struct iwch_dev *dev)
1172 {
1173         int ret;
1174 #ifdef notyet   
1175         int i;
1176 #endif
1177         CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1178         strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1179         memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1180 #ifdef notyet   
1181         memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1182 #endif  
1183         dev->device_cap_flags =
1184             (IB_DEVICE_ZERO_STAG |
1185              IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
1186
1187         dev->ibdev.uverbs_cmd_mask =
1188             (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1189             (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1190             (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1191             (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1192             (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1193             (1ull << IB_USER_VERBS_CMD_REG_MR) |
1194             (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1195             (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1196             (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1197             (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1198             (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1199             (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1200             (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1201             (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1202             (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1203             (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1204             (1ull << IB_USER_VERBS_CMD_POST_RECV);
1205         dev->ibdev.node_type = RDMA_NODE_RNIC;
1206         memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1207         dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1208         dev->ibdev.num_comp_vectors = 1;
1209         dev->ibdev.dma_device = dev->rdev.rnic_info.pdev;
1210         dev->ibdev.query_device = iwch_query_device;
1211         dev->ibdev.query_port = iwch_query_port;
1212         dev->ibdev.modify_port = iwch_modify_port;
1213         dev->ibdev.query_pkey = iwch_query_pkey;
1214         dev->ibdev.query_gid = iwch_query_gid;
1215         dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1216         dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1217 #ifdef notyet   
1218         dev->ibdev.mmap = iwch_mmap;
1219 #endif  
1220         dev->ibdev.alloc_pd = iwch_allocate_pd;
1221         dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1222         dev->ibdev.create_ah = iwch_ah_create;
1223         dev->ibdev.destroy_ah = iwch_ah_destroy;
1224         dev->ibdev.create_qp = iwch_create_qp;
1225         dev->ibdev.modify_qp = iwch_ib_modify_qp;
1226         dev->ibdev.destroy_qp = iwch_destroy_qp;
1227         dev->ibdev.create_cq = iwch_create_cq;
1228         dev->ibdev.destroy_cq = iwch_destroy_cq;
1229         dev->ibdev.resize_cq = iwch_resize_cq;
1230         dev->ibdev.poll_cq = iwch_poll_cq;
1231         dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1232         dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1233         dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1234         dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1235         dev->ibdev.dereg_mr = iwch_dereg_mr;
1236         dev->ibdev.alloc_mw = iwch_alloc_mw;
1237         dev->ibdev.bind_mw = iwch_bind_mw;
1238         dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1239
1240         dev->ibdev.attach_mcast = iwch_multicast_attach;
1241         dev->ibdev.detach_mcast = iwch_multicast_detach;
1242         dev->ibdev.process_mad = iwch_process_mad;
1243
1244         dev->ibdev.req_notify_cq = iwch_arm_cq;
1245         dev->ibdev.post_send = iwch_post_send;
1246         dev->ibdev.post_recv = iwch_post_receive;
1247
1248
1249         dev->ibdev.iwcm =
1250             (struct iw_cm_verbs *) kmalloc(sizeof(struct iw_cm_verbs),
1251                                            M_NOWAIT);
1252         dev->ibdev.iwcm->connect = iwch_connect;
1253         dev->ibdev.iwcm->accept = iwch_accept_cr;
1254         dev->ibdev.iwcm->reject = iwch_reject_cr;
1255         dev->ibdev.iwcm->create_listen = iwch_create_listen;
1256         dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1257         dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1258         dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1259         dev->ibdev.iwcm->get_qp = iwch_get_qp;
1260
1261         ret = ib_register_device(&dev->ibdev);
1262         if (ret)
1263                 goto bail1;
1264 #ifdef notyet
1265         for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
1266                 ret = class_device_create_file(&dev->ibdev.class_dev,
1267                                                iwch_class_attributes[i]);
1268                 if (ret) {
1269                         goto bail2;
1270                 }
1271         }
1272 #endif  
1273         return 0;
1274 #ifdef notyet   
1275 bail2:
1276 #endif  
1277         ib_unregister_device(&dev->ibdev);
1278 bail1:
1279         return ret;
1280 }
1281
1282 void iwch_unregister_device(struct iwch_dev *dev)
1283 {
1284 #ifdef notyet
1285         int i;
1286
1287         CTR2(KTR_IW_CXGB, "%s iwch_dev %p", __FUNCTION__, dev);
1288
1289         for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1290                 class_device_remove_file(&dev->ibdev.class_dev,
1291                                          iwch_class_attributes[i]);
1292 #endif  
1293         ib_unregister_device(&dev->ibdev);
1294         return;
1295 }