2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
38 #include <linux/types.h>
39 #include <linux/kref.h>
40 #include <rdma/ib_umem.h>
41 #include <asm/atomic.h>
43 #include <common/t4_msg.h>
46 #define T4_ULPTX_MIN_IO 32
47 #define C4IW_MAX_INLINE_SIZE 96
50 mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
53 return ((is_t4(dev->rdev.adap) ||
54 is_t5(dev->rdev.adap)) &&
55 length >= 8*1024*1024*1024ULL);
59 write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
61 struct adapter *sc = rdev->adap;
62 struct ulp_mem_io *ulpmc;
63 struct ulptx_idata *ulpsc;
64 u8 wr_len, *to_dp, *from_dp;
65 int copy_len, num_wqe, i, ret = 0;
66 struct c4iw_wr_wait wr_wait;
70 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
72 cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER);
74 cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
77 CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
78 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
79 c4iw_init_wr_wait(&wr_wait);
80 for (i = 0; i < num_wqe; i++) {
82 copy_len = min(len, C4IW_MAX_INLINE_SIZE);
83 wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
84 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
86 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
91 memset(ulpmc, 0, wr_len);
92 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
94 if (i == (num_wqe-1)) {
95 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
97 ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
99 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
100 ulpmc->wr.wr_mid = cpu_to_be32(
101 V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
104 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
105 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
106 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
108 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
110 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
111 ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
112 ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
114 to_dp = (u8 *)(ulpsc + 1);
115 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
117 memcpy(to_dp, from_dp, copy_len);
119 memset(to_dp, 0, copy_len);
120 if (copy_len % T4_ULPTX_MIN_IO)
121 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
122 (copy_len % T4_ULPTX_MIN_IO));
124 len -= C4IW_MAX_INLINE_SIZE;
127 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
132 * Build and write a TPT entry.
133 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
134 * pbl_size and pbl_addr
137 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
138 u32 *stag, u8 stag_state, u32 pdid,
139 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
140 int bind_enabled, u32 zbva, u64 to,
141 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
144 struct fw_ri_tpte tpt;
148 if (c4iw_fatal_error(rdev))
151 stag_state = stag_state > 0;
152 stag_idx = (*stag) >> 8;
154 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
155 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
157 mutex_lock(&rdev->stats.lock);
158 rdev->stats.stag.fail++;
159 mutex_unlock(&rdev->stats.lock);
162 mutex_lock(&rdev->stats.lock);
163 rdev->stats.stag.cur += 32;
164 if (rdev->stats.stag.cur > rdev->stats.stag.max)
165 rdev->stats.stag.max = rdev->stats.stag.cur;
166 mutex_unlock(&rdev->stats.lock);
167 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
170 "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
171 __func__, stag_state, type, pdid, stag_idx);
173 /* write TPT entry */
175 memset(&tpt, 0, sizeof(tpt));
177 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
178 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
179 V_FW_RI_TPTE_STAGSTATE(stag_state) |
180 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
181 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
182 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
183 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
185 V_FW_RI_TPTE_PS(page_size));
186 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
187 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
188 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
189 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
190 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
191 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
192 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
194 err = write_adapter_mem(rdev, stag_idx +
195 (rdev->adap->vres.stag.start >> 5),
198 if (reset_tpt_entry) {
199 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
200 mutex_lock(&rdev->stats.lock);
201 rdev->stats.stag.cur -= 32;
202 mutex_unlock(&rdev->stats.lock);
207 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
208 u32 pbl_addr, u32 pbl_size)
212 CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
213 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
215 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
219 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
222 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
226 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
228 *stag = T4_STAG_UNSET;
229 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
233 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
235 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
239 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
240 u32 pbl_size, u32 pbl_addr)
242 *stag = T4_STAG_UNSET;
243 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
244 0UL, 0, 0, pbl_size, pbl_addr);
247 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
252 mhp->attr.stag = stag;
254 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
255 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
256 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
259 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
260 struct c4iw_mr *mhp, int shift)
262 u32 stag = T4_STAG_UNSET;
265 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0,
267 mhp->attr.mw_bind_enable, mhp->attr.zbva,
268 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12,
269 mhp->attr.pbl_size, mhp->attr.pbl_addr);
273 ret = finish_mem_reg(mhp, stag);
275 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
280 static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
281 struct c4iw_mr *mhp, int shift, int npages)
286 if (npages > mhp->attr.pbl_size)
289 stag = mhp->attr.stag;
290 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
291 FW_RI_STAG_NSMR, mhp->attr.perms,
292 mhp->attr.mw_bind_enable, mhp->attr.zbva,
293 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
294 mhp->attr.pbl_size, mhp->attr.pbl_addr);
298 ret = finish_mem_reg(mhp, stag);
300 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
306 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
308 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
311 if (!mhp->attr.pbl_addr)
314 mhp->attr.pbl_size = npages;
319 static int build_phys_page_list(struct ib_phys_buf *buffer_list,
320 int num_phys_buf, u64 *iova_start,
321 u64 *total_size, int *npages,
322 int *shift, __be64 **page_list)
329 for (i = 0; i < num_phys_buf; ++i) {
330 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
332 if (i != 0 && i != num_phys_buf - 1 &&
333 (buffer_list[i].size & ~PAGE_MASK))
335 *total_size += buffer_list[i].size;
337 mask |= buffer_list[i].addr;
339 mask |= buffer_list[i].addr & PAGE_MASK;
340 if (i != num_phys_buf - 1)
341 mask |= buffer_list[i].addr + buffer_list[i].size;
343 mask |= (buffer_list[i].addr + buffer_list[i].size +
344 PAGE_SIZE - 1) & PAGE_MASK;
347 /* Find largest page shift we can use to cover buffers */
348 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
349 if ((1ULL << *shift) & mask)
352 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
353 buffer_list[0].addr &= ~0ull << *shift;
356 for (i = 0; i < num_phys_buf; ++i)
357 *npages += (buffer_list[i].size +
358 (1ULL << *shift) - 1) >> *shift;
363 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
368 for (i = 0; i < num_phys_buf; ++i)
370 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
372 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
373 ((u64) j << *shift));
376 "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__,
377 (unsigned long long)*iova_start, (unsigned long long)mask, *shift,
378 (unsigned long long)*total_size, *npages);
384 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
385 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
386 int num_phys_buf, int acc, u64 *iova_start)
389 struct c4iw_mr mh, *mhp;
391 struct c4iw_dev *rhp;
392 __be64 *page_list = NULL;
398 CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd);
400 /* There can be no memory windows */
401 if (atomic_read(&mr->usecnt))
404 mhp = to_c4iw_mr(mr);
406 php = to_c4iw_pd(mr->pd);
408 /* make sure we are on the same adapter */
412 memcpy(&mh, mhp, sizeof *mhp);
414 if (mr_rereg_mask & IB_MR_REREG_PD)
415 php = to_c4iw_pd(pd);
416 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
417 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
418 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
421 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
422 ret = build_phys_page_list(buffer_list, num_phys_buf,
424 &total_size, &npages,
429 if (mr_exceeds_hw_limits(rhp, total_size)) {
433 ret = reregister_mem(rhp, php, &mh, shift, npages);
437 if (mr_rereg_mask & IB_MR_REREG_PD)
438 mhp->attr.pdid = php->pdid;
439 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
440 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
441 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
443 mhp->attr.va_fbo = *iova_start;
444 mhp->attr.page_size = shift - 12;
445 mhp->attr.len = (u32) total_size;
446 mhp->attr.pbl_size = npages;
452 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
453 struct ib_phys_buf *buffer_list,
454 int num_phys_buf, int acc, u64 *iova_start)
460 struct c4iw_dev *rhp;
465 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
466 php = to_c4iw_pd(pd);
469 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
471 return ERR_PTR(-ENOMEM);
475 /* First check that we have enough alignment */
476 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
481 if (num_phys_buf > 1 &&
482 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
487 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
488 &total_size, &npages, &shift,
493 if (mr_exceeds_hw_limits(rhp, total_size)) {
498 ret = alloc_pbl(mhp, npages);
504 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
510 mhp->attr.pdid = php->pdid;
513 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
514 mhp->attr.va_fbo = *iova_start;
515 mhp->attr.page_size = shift - 12;
517 mhp->attr.len = (u32) total_size;
518 mhp->attr.pbl_size = npages;
519 ret = register_mem(rhp, php, mhp, shift);
526 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
527 mhp->attr.pbl_size << 3);
535 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
537 struct c4iw_dev *rhp;
541 u32 stag = T4_STAG_UNSET;
543 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
544 php = to_c4iw_pd(pd);
547 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
549 return ERR_PTR(-ENOMEM);
552 mhp->attr.pdid = php->pdid;
553 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
554 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
556 mhp->attr.va_fbo = 0;
557 mhp->attr.page_size = 0;
558 mhp->attr.len = ~0UL;
559 mhp->attr.pbl_size = 0;
561 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
562 FW_RI_STAG_NSMR, mhp->attr.perms,
563 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
567 ret = finish_mem_reg(mhp, stag);
572 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
579 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
580 u64 virt, int acc, struct ib_udata *udata, int mr_id)
586 struct scatterlist *sg;
587 struct c4iw_dev *rhp;
591 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
594 return ERR_PTR(-EINVAL);
596 if ((length + start) < start)
597 return ERR_PTR(-EINVAL);
599 php = to_c4iw_pd(pd);
602 if (mr_exceeds_hw_limits(rhp, length))
603 return ERR_PTR(-EINVAL);
605 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
607 return ERR_PTR(-ENOMEM);
611 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
612 if (IS_ERR(mhp->umem)) {
613 err = PTR_ERR(mhp->umem);
618 shift = ffs(mhp->umem->page_size) - 1;
621 err = alloc_pbl(mhp, n);
625 pages = (__be64 *) __get_free_page(GFP_KERNEL);
632 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
633 len = sg_dma_len(sg) >> shift;
634 for (k = 0; k < len; ++k) {
635 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
636 mhp->umem->page_size * k);
637 if (i == PAGE_SIZE / sizeof *pages) {
638 err = write_pbl(&mhp->rhp->rdev,
640 mhp->attr.pbl_addr + (n << 3), i);
651 err = write_pbl(&mhp->rhp->rdev, pages,
652 mhp->attr.pbl_addr + (n << 3), i);
655 free_page((unsigned long) pages);
659 mhp->attr.pdid = php->pdid;
661 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
662 mhp->attr.va_fbo = virt;
663 mhp->attr.page_size = shift - 12;
664 mhp->attr.len = length;
666 err = register_mem(rhp, php, mhp, shift);
673 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
674 mhp->attr.pbl_size << 3);
677 ib_umem_release(mhp->umem);
682 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
684 struct c4iw_dev *rhp;
691 php = to_c4iw_pd(pd);
693 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
695 return ERR_PTR(-ENOMEM);
696 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
702 mhp->attr.pdid = php->pdid;
703 mhp->attr.type = FW_RI_STAG_MW;
704 mhp->attr.stag = stag;
706 mhp->ibmw.rkey = stag;
707 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
708 deallocate_window(&rhp->rdev, mhp->attr.stag);
710 return ERR_PTR(-ENOMEM);
712 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
717 int c4iw_dealloc_mw(struct ib_mw *mw)
719 struct c4iw_dev *rhp;
723 mhp = to_c4iw_mw(mw);
725 mmid = (mw->rkey) >> 8;
726 remove_handle(rhp, &rhp->mmidr, mmid);
727 deallocate_window(&rhp->rdev, mhp->attr.stag);
729 CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
734 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
736 struct c4iw_dev *rhp;
743 php = to_c4iw_pd(pd);
745 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
752 ret = alloc_pbl(mhp, pbl_depth);
755 mhp->attr.pbl_size = pbl_depth;
756 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
757 mhp->attr.pbl_size, mhp->attr.pbl_addr);
760 mhp->attr.pdid = php->pdid;
761 mhp->attr.type = FW_RI_STAG_NSMR;
762 mhp->attr.stag = stag;
765 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
766 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
771 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
775 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
778 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
779 mhp->attr.pbl_size << 3);
786 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
789 struct c4iw_fr_page_list *c4pl;
790 struct c4iw_dev *dev = to_c4iw_dev(device);
792 int size = sizeof *c4pl + page_list_len * sizeof(u64);
794 c4pl = contigmalloc(size,
795 M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
797 dma_addr = vtophys(c4pl);
799 return ERR_PTR(-ENOMEM);
801 pci_unmap_addr_set(c4pl, mapping, dma_addr);
802 c4pl->dma_addr = dma_addr;
805 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
806 c4pl->ibpl.max_page_list_len = page_list_len;
811 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
813 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
814 contigfree(c4pl, c4pl->size, M_DEVBUF);
817 int c4iw_dereg_mr(struct ib_mr *ib_mr)
819 struct c4iw_dev *rhp;
823 CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
824 /* There can be no memory windows */
825 if (atomic_read(&ib_mr->usecnt))
828 mhp = to_c4iw_mr(ib_mr);
830 mmid = mhp->attr.stag >> 8;
831 remove_handle(rhp, &rhp->mmidr, mmid);
832 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
834 if (mhp->attr.pbl_size)
835 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
836 mhp->attr.pbl_size << 3);
838 kfree((void *) (unsigned long) mhp->kva);
840 ib_umem_release(mhp->umem);
841 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);