3 * Copyright (c) 2015, Mellanox Technologies, Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 static struct fast_reg_descriptor *
30 iser_reg_desc_get(struct ib_conn *ib_conn)
32 struct fast_reg_descriptor *desc;
34 mtx_lock(&ib_conn->lock);
35 desc = list_first_entry(&ib_conn->fastreg.pool,
36 struct fast_reg_descriptor, list);
37 list_del(&desc->list);
38 mtx_unlock(&ib_conn->lock);
44 iser_reg_desc_put(struct ib_conn *ib_conn,
45 struct fast_reg_descriptor *desc)
47 mtx_lock(&ib_conn->lock);
48 list_add(&desc->list, &ib_conn->fastreg.pool);
49 mtx_unlock(&ib_conn->lock);
52 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
55 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
56 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
57 * the number of entries which are aligned correctly. Supports the case where
58 * consecutive SG elements are actually fragments of the same physcial page.
61 iser_data_buf_aligned_len(struct iser_data_buf *data, struct ib_device *ibdev)
63 struct scatterlist *sg, *sgl, *next_sg = NULL;
64 u64 start_addr, end_addr;
65 int i, ret_len, start_check = 0;
67 if (data->dma_nents == 1)
71 start_addr = ib_sg_dma_address(ibdev, sgl);
73 for_each_sg(sgl, sg, data->dma_nents, i) {
74 if (start_check && !IS_4K_ALIGNED(start_addr))
77 next_sg = sg_next(sg);
81 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
82 start_addr = ib_sg_dma_address(ibdev, next_sg);
84 if (end_addr == start_addr) {
90 if (!IS_4K_ALIGNED(end_addr))
93 ret_len = (next_sg) ? i : i+1;
99 iser_dma_unmap_task_data(struct icl_iser_pdu *iser_pdu,
100 struct iser_data_buf *data,
101 enum dma_data_direction dir)
103 struct ib_device *dev;
105 dev = iser_pdu->iser_conn->ib_conn.device->ib_device;
106 ib_dma_unmap_sg(dev, data->sgl, data->size, dir);
110 iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
111 struct iser_mem_reg *reg)
113 struct scatterlist *sg = mem->sgl;
115 reg->sge.lkey = device->mr->lkey;
116 reg->rkey = device->mr->rkey;
117 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
118 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
124 * TODO: This should be a verb
125 * iser_ib_inc_rkey - increments the key portion of the given rkey. Can be used
126 * for calculating a new rkey for type 2 memory windows.
127 * @rkey - the rkey to increment.
130 iser_ib_inc_rkey(u32 rkey)
132 const u32 mask = 0x000000ff;
134 return (((rkey + 1) & mask) | (rkey & ~mask));
138 iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
142 memset(inv_wr, 0, sizeof(*inv_wr));
143 inv_wr->opcode = IB_WR_LOCAL_INV;
144 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
145 inv_wr->ex.invalidate_rkey = mr->rkey;
147 rkey = iser_ib_inc_rkey(mr->rkey);
148 ib_update_fast_reg_key(mr, rkey);
152 iser_fast_reg_mr(struct icl_iser_pdu *iser_pdu,
153 struct iser_data_buf *mem,
154 struct iser_reg_resources *rsc,
155 struct iser_mem_reg *reg)
157 struct ib_conn *ib_conn = &iser_pdu->iser_conn->ib_conn;
158 struct iser_device *device = ib_conn->device;
159 struct ib_mr *mr = rsc->mr;
160 struct ib_reg_wr fastreg_wr;
161 struct ib_send_wr inv_wr;
162 struct ib_send_wr *bad_wr, *wr = NULL;
165 /* if there a single dma entry, dma mr suffices */
166 if (mem->dma_nents == 1)
167 return iser_reg_dma(device, mem, reg);
169 if (!rsc->mr_valid) {
170 iser_inv_rkey(&inv_wr, mr);
174 n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
175 if (unlikely(n != mem->size)) {
176 ISER_ERR("failed to map sg (%d/%d)\n", n, mem->size);
177 return n < 0 ? n : -EINVAL;
179 /* Prepare FASTREG WR */
180 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
181 fastreg_wr.wr.opcode = IB_WR_REG_MR;
182 fastreg_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
183 fastreg_wr.wr.num_sge = 0;
185 fastreg_wr.key = mr->rkey;
186 fastreg_wr.access = IB_ACCESS_LOCAL_WRITE |
187 IB_ACCESS_REMOTE_WRITE |
188 IB_ACCESS_REMOTE_READ;
193 wr->next = &fastreg_wr.wr;
195 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
197 ISER_ERR("fast registration failed, ret:%d", ret);
202 reg->sge.lkey = mr->lkey;
203 reg->rkey = mr->rkey;
204 reg->sge.addr = mr->iova;
205 reg->sge.length = mr->length;
211 * iser_reg_rdma_mem - Registers memory intended for RDMA,
212 * using Fast Registration WR (if possible) obtaining rkey and va
214 * returns 0 on success, errno code on failure
217 iser_reg_rdma_mem(struct icl_iser_pdu *iser_pdu,
218 enum iser_data_dir cmd_dir)
220 struct ib_conn *ib_conn = &iser_pdu->iser_conn->ib_conn;
221 struct iser_device *device = ib_conn->device;
222 struct ib_device *ibdev = device->ib_device;
223 struct iser_data_buf *mem = &iser_pdu->data[cmd_dir];
224 struct iser_mem_reg *mem_reg = &iser_pdu->rdma_reg[cmd_dir];
225 struct fast_reg_descriptor *desc = NULL;
226 int err, aligned_len;
228 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
229 if (aligned_len != mem->dma_nents) {
230 ISER_ERR("bounce buffer is not supported");
234 if (mem->dma_nents != 1) {
235 desc = iser_reg_desc_get(ib_conn);
236 mem_reg->mem_h = desc;
239 err = iser_fast_reg_mr(iser_pdu, mem, desc ? &desc->rsc : NULL,
248 iser_reg_desc_put(ib_conn, desc);
254 iser_unreg_rdma_mem(struct icl_iser_pdu *iser_pdu,
255 enum iser_data_dir cmd_dir)
257 struct iser_mem_reg *reg = &iser_pdu->rdma_reg[cmd_dir];
262 iser_reg_desc_put(&iser_pdu->iser_conn->ib_conn,
268 iser_dma_map_task_data(struct icl_iser_pdu *iser_pdu,
269 struct iser_data_buf *data,
270 enum iser_data_dir iser_dir,
271 enum dma_data_direction dma_dir)
273 struct ib_device *dev;
275 iser_pdu->dir[iser_dir] = 1;
276 dev = iser_pdu->iser_conn->ib_conn.device->ib_device;
278 data->dma_nents = ib_dma_map_sg(dev, data->sgl, data->size, dma_dir);
279 if (data->dma_nents == 0) {
280 ISER_ERR("dma_map_sg failed");