2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include <linux/types.h>
41 #include <linux/kref.h>
42 #include <rdma/ib_umem.h>
43 #include <asm/atomic.h>
45 #include <common/t4_msg.h>
48 #define T4_ULPTX_MIN_IO 32
49 #define C4IW_MAX_INLINE_SIZE 96
50 #define T4_ULPTX_MAX_DMA 1024
53 mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
56 return (is_t5(dev->rdev.adap) && length >= 8*1024*1024*1024ULL);
60 _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
63 struct adapter *sc = rdev->adap;
64 struct ulp_mem_io *ulpmc;
65 struct ulptx_sgl *sgl;
68 struct c4iw_wr_wait wr_wait;
74 c4iw_init_wr_wait(&wr_wait);
75 wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
77 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
82 memset(ulpmc, 0, wr_len);
83 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
84 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
85 (wait ? F_FW_WR_COMPL : 0));
86 ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
87 ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
88 ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
89 V_T5_ULP_MEMIO_ORDER(1) |
90 V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
91 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
92 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
93 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
95 sgl = (struct ulptx_sgl *)(ulpmc + 1);
96 sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
98 sgl->len0 = cpu_to_be32(len);
99 sgl->addr0 = cpu_to_be64((u64)data);
104 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
110 _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
112 struct adapter *sc = rdev->adap;
113 struct ulp_mem_io *ulpmc;
114 struct ulptx_idata *ulpsc;
115 u8 wr_len, *to_dp, *from_dp;
116 int copy_len, num_wqe, i, ret = 0;
117 struct c4iw_wr_wait wr_wait;
121 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
123 cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
126 CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
127 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
128 c4iw_init_wr_wait(&wr_wait);
129 for (i = 0; i < num_wqe; i++) {
131 copy_len = min(len, C4IW_MAX_INLINE_SIZE);
132 wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
133 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
135 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
140 memset(ulpmc, 0, wr_len);
141 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
143 if (i == (num_wqe-1)) {
144 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
147 (__force __be64)(unsigned long) &wr_wait;
149 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
150 ulpmc->wr.wr_mid = cpu_to_be32(
151 V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
154 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
155 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
156 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
158 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
160 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
161 ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
162 ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
164 to_dp = (u8 *)(ulpsc + 1);
165 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
167 memcpy(to_dp, from_dp, copy_len);
169 memset(to_dp, 0, copy_len);
170 if (copy_len % T4_ULPTX_MIN_IO)
171 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
172 (copy_len % T4_ULPTX_MIN_IO));
174 len -= C4IW_MAX_INLINE_SIZE;
177 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
182 _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
184 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
191 daddr = dma_map_single(rhp->ibdev.dma_device, data, len, DMA_TO_DEVICE);
192 if (dma_mapping_error(rhp->ibdev.dma_device, daddr))
196 while (remain > inline_threshold) {
197 if (remain < T4_ULPTX_MAX_DMA) {
198 if (remain & ~T4_ULPTX_MIN_IO)
199 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
203 dmalen = T4_ULPTX_MAX_DMA;
205 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
206 (void *)daddr, !remain);
210 data = (u8 *)data + dmalen;
211 daddr = daddr + dmalen;
214 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
216 dma_unmap_single(rhp->ibdev.dma_device, save, len, DMA_TO_DEVICE);
221 * write len bytes of data into addr (32B aligned address)
222 * If data is NULL, clear len byte of memory to zero.
225 write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
228 if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl) {
229 if (len > inline_threshold) {
230 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
231 log(LOG_ERR, "%s: dma map "
232 "failure (non fatal)\n", __func__);
233 return _c4iw_write_mem_inline(rdev, addr, len,
238 return _c4iw_write_mem_inline(rdev, addr, len, data);
240 return _c4iw_write_mem_inline(rdev, addr, len, data);
245 * Build and write a TPT entry.
246 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
247 * pbl_size and pbl_addr
250 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
251 u32 *stag, u8 stag_state, u32 pdid,
252 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
253 int bind_enabled, u32 zbva, u64 to,
254 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
257 struct fw_ri_tpte tpt;
261 if (c4iw_fatal_error(rdev))
264 stag_state = stag_state > 0;
265 stag_idx = (*stag) >> 8;
267 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
268 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
270 mutex_lock(&rdev->stats.lock);
271 rdev->stats.stag.fail++;
272 mutex_unlock(&rdev->stats.lock);
275 mutex_lock(&rdev->stats.lock);
276 rdev->stats.stag.cur += 32;
277 if (rdev->stats.stag.cur > rdev->stats.stag.max)
278 rdev->stats.stag.max = rdev->stats.stag.cur;
279 mutex_unlock(&rdev->stats.lock);
280 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
283 "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
284 __func__, stag_state, type, pdid, stag_idx);
286 /* write TPT entry */
288 memset(&tpt, 0, sizeof(tpt));
290 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
291 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
292 V_FW_RI_TPTE_STAGSTATE(stag_state) |
293 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
294 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
295 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
296 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
298 V_FW_RI_TPTE_PS(page_size));
299 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
300 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
301 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
302 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
303 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
304 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
305 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
307 err = write_adapter_mem(rdev, stag_idx +
308 (rdev->adap->vres.stag.start >> 5),
311 if (reset_tpt_entry) {
312 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
313 mutex_lock(&rdev->stats.lock);
314 rdev->stats.stag.cur -= 32;
315 mutex_unlock(&rdev->stats.lock);
320 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
321 u32 pbl_addr, u32 pbl_size)
325 CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
326 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
328 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
332 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
335 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
339 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
341 *stag = T4_STAG_UNSET;
342 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
346 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
348 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
352 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
353 u32 pbl_size, u32 pbl_addr)
355 *stag = T4_STAG_UNSET;
356 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
357 0UL, 0, 0, pbl_size, pbl_addr);
360 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
365 mhp->attr.stag = stag;
367 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
368 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
369 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
372 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
373 struct c4iw_mr *mhp, int shift)
375 u32 stag = T4_STAG_UNSET;
378 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
379 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0,
380 mhp->attr.mw_bind_enable, mhp->attr.zbva,
381 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12,
382 mhp->attr.pbl_size, mhp->attr.pbl_addr);
386 ret = finish_mem_reg(mhp, stag);
388 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
393 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
395 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
398 if (!mhp->attr.pbl_addr)
401 mhp->attr.pbl_size = npages;
406 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
408 struct c4iw_dev *rhp;
412 u32 stag = T4_STAG_UNSET;
414 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
415 php = to_c4iw_pd(pd);
418 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
420 return ERR_PTR(-ENOMEM);
423 mhp->attr.pdid = php->pdid;
424 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
425 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
427 mhp->attr.va_fbo = 0;
428 mhp->attr.page_size = 0;
429 mhp->attr.len = ~0ULL;
430 mhp->attr.pbl_size = 0;
432 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
433 FW_RI_STAG_NSMR, mhp->attr.perms,
434 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
438 ret = finish_mem_reg(mhp, stag);
443 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
450 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
451 u64 virt, int acc, struct ib_udata *udata)
457 struct scatterlist *sg;
458 struct c4iw_dev *rhp;
462 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
465 return ERR_PTR(-EINVAL);
467 if ((length + start) < start)
468 return ERR_PTR(-EINVAL);
470 php = to_c4iw_pd(pd);
473 if (mr_exceeds_hw_limits(rhp, length))
474 return ERR_PTR(-EINVAL);
476 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
478 return ERR_PTR(-ENOMEM);
482 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
483 if (IS_ERR(mhp->umem)) {
484 err = PTR_ERR(mhp->umem);
489 shift = ffs(mhp->umem->page_size) - 1;
492 err = alloc_pbl(mhp, n);
496 pages = (__be64 *) __get_free_page(GFP_KERNEL);
503 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
504 len = sg_dma_len(sg) >> shift;
505 for (k = 0; k < len; ++k) {
506 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
507 mhp->umem->page_size * k);
508 if (i == PAGE_SIZE / sizeof *pages) {
509 err = write_pbl(&mhp->rhp->rdev,
511 mhp->attr.pbl_addr + (n << 3), i);
522 err = write_pbl(&mhp->rhp->rdev, pages,
523 mhp->attr.pbl_addr + (n << 3), i);
526 free_page((unsigned long) pages);
530 mhp->attr.pdid = php->pdid;
532 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
533 mhp->attr.va_fbo = virt;
534 mhp->attr.page_size = shift - 12;
535 mhp->attr.len = length;
537 err = register_mem(rhp, php, mhp, shift);
544 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
545 mhp->attr.pbl_size << 3);
548 ib_umem_release(mhp->umem);
553 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
554 struct ib_udata *udata)
556 struct c4iw_dev *rhp;
563 if (type != IB_MW_TYPE_1)
564 return ERR_PTR(-EINVAL);
566 php = to_c4iw_pd(pd);
568 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
570 return ERR_PTR(-ENOMEM);
571 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
577 mhp->attr.pdid = php->pdid;
578 mhp->attr.type = FW_RI_STAG_MW;
579 mhp->attr.stag = stag;
581 mhp->ibmw.rkey = stag;
582 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
583 deallocate_window(&rhp->rdev, mhp->attr.stag);
585 return ERR_PTR(-ENOMEM);
587 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
592 int c4iw_dealloc_mw(struct ib_mw *mw)
594 struct c4iw_dev *rhp;
598 mhp = to_c4iw_mw(mw);
600 mmid = (mw->rkey) >> 8;
601 remove_handle(rhp, &rhp->mmidr, mmid);
602 deallocate_window(&rhp->rdev, mhp->attr.stag);
604 CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
609 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
610 enum ib_mr_type mr_type,
613 struct c4iw_dev *rhp;
619 int length = roundup(max_num_sg * sizeof(u64), 32);
621 php = to_c4iw_pd(pd);
624 if (mr_type != IB_MR_TYPE_MEM_REG ||
625 max_num_sg > t4_max_fr_depth(
626 rhp->rdev.adap->params.ulptx_memwrite_dsgl && use_dsgl))
627 return ERR_PTR(-EINVAL);
629 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
635 mhp->mpl = dma_alloc_coherent(rhp->ibdev.dma_device,
636 length, &mhp->mpl_addr, GFP_KERNEL);
641 mhp->max_mpl_len = length;
644 ret = alloc_pbl(mhp, max_num_sg);
647 mhp->attr.pbl_size = max_num_sg;
648 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
649 mhp->attr.pbl_size, mhp->attr.pbl_addr);
652 mhp->attr.pdid = php->pdid;
653 mhp->attr.type = FW_RI_STAG_NSMR;
654 mhp->attr.stag = stag;
657 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
658 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
663 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
666 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
669 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
670 mhp->attr.pbl_size << 3);
672 dma_free_coherent(rhp->ibdev.dma_device,
673 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
679 static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
681 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
683 if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
686 mhp->mpl[mhp->mpl_len++] = addr;
691 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
692 int sg_nents, unsigned int *sg_offset)
694 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
698 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
702 int c4iw_dereg_mr(struct ib_mr *ib_mr)
704 struct c4iw_dev *rhp;
708 CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
710 mhp = to_c4iw_mr(ib_mr);
712 mmid = mhp->attr.stag >> 8;
713 remove_handle(rhp, &rhp->mmidr, mmid);
714 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
716 if (mhp->attr.pbl_size)
717 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
718 mhp->attr.pbl_size << 3);
720 kfree((void *) (unsigned long) mhp->kva);
722 ib_umem_release(mhp->umem);
723 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
728 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
733 spin_lock_irqsave(&rhp->lock, flags);
734 mhp = get_mhp(rhp, rkey >> 8);
737 spin_unlock_irqrestore(&rhp->lock, flags);