3 * Copyright (c) 2015, Mellanox Technologies, Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 static struct fast_reg_descriptor *
30 iser_reg_desc_get(struct ib_conn *ib_conn)
32 struct fast_reg_descriptor *desc;
34 mtx_lock(&ib_conn->lock);
35 desc = list_first_entry(&ib_conn->fastreg.pool,
36 struct fast_reg_descriptor, list);
37 list_del(&desc->list);
38 mtx_unlock(&ib_conn->lock);
44 iser_reg_desc_put(struct ib_conn *ib_conn,
45 struct fast_reg_descriptor *desc)
47 mtx_lock(&ib_conn->lock);
48 list_add(&desc->list, &ib_conn->fastreg.pool);
49 mtx_unlock(&ib_conn->lock);
52 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
55 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
56 * and returns the length of resulting physical address array (may be less than
57 * the original due to possible compaction).
59 * we build a "page vec" under the assumption that the SG meets the RDMA
60 * alignment requirements. Other then the first and last SG elements, all
61 * the "internal" elements can be compacted into a list whose elements are
62 * dma addresses of physical pages. The code supports also the weird case
63 * where --few fragments of the same page-- are present in the SG as
64 * consecutive elements. Also, it handles one entry SG.
67 iser_sg_to_page_vec(struct iser_data_buf *data,
68 struct ib_device *ibdev, u64 *pages,
69 int *offset, int *data_size)
71 struct scatterlist *sg, *sgl = data->sgl;
72 u64 start_addr, end_addr, page, chunk_start = 0;
73 unsigned long total_sz = 0;
75 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
77 /* compute the offset of first element */
78 *offset = (u64) sgl[0].offset & ~MASK_4K;
82 for_each_sg(sgl, sg, data->dma_nents, i) {
83 start_addr = ib_sg_dma_address(ibdev, sg);
85 chunk_start = start_addr;
86 dma_len = ib_sg_dma_len(ibdev, sg);
87 end_addr = start_addr + dma_len;
90 /* collect page fragments until aligned or end of SG list */
91 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
97 /* address of the first page in the contiguous chunk;
98 masking relevant for the very first SG entry,
99 which might be unaligned */
100 page = chunk_start & MASK_4K;
102 pages[cur_page++] = page;
104 } while (page < end_addr);
107 *data_size = total_sz;
113 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
114 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
115 * the number of entries which are aligned correctly. Supports the case where
116 * consecutive SG elements are actually fragments of the same physcial page.
119 iser_data_buf_aligned_len(struct iser_data_buf *data, struct ib_device *ibdev)
121 struct scatterlist *sg, *sgl, *next_sg = NULL;
122 u64 start_addr, end_addr;
123 int i, ret_len, start_check = 0;
125 if (data->dma_nents == 1)
129 start_addr = ib_sg_dma_address(ibdev, sgl);
131 for_each_sg(sgl, sg, data->dma_nents, i) {
132 if (start_check && !IS_4K_ALIGNED(start_addr))
135 next_sg = sg_next(sg);
139 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
140 start_addr = ib_sg_dma_address(ibdev, next_sg);
142 if (end_addr == start_addr) {
148 if (!IS_4K_ALIGNED(end_addr))
151 ret_len = (next_sg) ? i : i+1;
157 iser_dma_unmap_task_data(struct icl_iser_pdu *iser_pdu,
158 struct iser_data_buf *data,
159 enum dma_data_direction dir)
161 struct ib_device *dev;
163 dev = iser_pdu->iser_conn->ib_conn.device->ib_device;
164 ib_dma_unmap_sg(dev, data->sgl, data->size, dir);
168 iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
169 struct iser_mem_reg *reg)
171 struct scatterlist *sg = mem->sgl;
173 reg->sge.lkey = device->mr->lkey;
174 reg->rkey = device->mr->rkey;
175 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
176 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
182 * TODO: This should be a verb
183 * iser_ib_inc_rkey - increments the key portion of the given rkey. Can be used
184 * for calculating a new rkey for type 2 memory windows.
185 * @rkey - the rkey to increment.
188 iser_ib_inc_rkey(u32 rkey)
190 const u32 mask = 0x000000ff;
192 return (((rkey + 1) & mask) | (rkey & ~mask));
196 iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
200 memset(inv_wr, 0, sizeof(*inv_wr));
201 inv_wr->opcode = IB_WR_LOCAL_INV;
202 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
203 inv_wr->ex.invalidate_rkey = mr->rkey;
205 rkey = iser_ib_inc_rkey(mr->rkey);
206 ib_update_fast_reg_key(mr, rkey);
210 iser_fast_reg_mr(struct icl_iser_pdu *iser_pdu,
211 struct iser_data_buf *mem,
212 struct iser_reg_resources *rsc,
213 struct iser_mem_reg *reg)
215 struct ib_conn *ib_conn = &iser_pdu->iser_conn->ib_conn;
216 struct iser_device *device = ib_conn->device;
217 struct ib_send_wr fastreg_wr, inv_wr;
218 struct ib_send_wr *bad_wr, *wr = NULL;
219 int ret, offset, size, plen;
221 /* if there a single dma entry, dma mr suffices */
222 if (mem->dma_nents == 1)
223 return iser_reg_dma(device, mem, reg);
225 /* rsc is not null */
226 plen = iser_sg_to_page_vec(mem, device->ib_device,
227 rsc->frpl->page_list,
229 if (plen * SIZE_4K < size) {
230 ISER_ERR("fast reg page_list too short to hold this SG");
234 if (!rsc->mr_valid) {
235 iser_inv_rkey(&inv_wr, rsc->mr);
239 /* Prepare FASTREG WR */
240 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
241 fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
242 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
243 fastreg_wr.wr.fast_reg.iova_start = rsc->frpl->page_list[0] + offset;
244 fastreg_wr.wr.fast_reg.page_list = rsc->frpl;
245 fastreg_wr.wr.fast_reg.page_list_len = plen;
246 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
247 fastreg_wr.wr.fast_reg.length = size;
248 fastreg_wr.wr.fast_reg.rkey = rsc->mr->rkey;
249 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
250 IB_ACCESS_REMOTE_WRITE |
251 IB_ACCESS_REMOTE_READ);
256 wr->next = &fastreg_wr;
258 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
260 ISER_ERR("fast registration failed, ret:%d", ret);
265 reg->sge.lkey = rsc->mr->lkey;
266 reg->rkey = rsc->mr->rkey;
267 reg->sge.addr = rsc->frpl->page_list[0] + offset;
268 reg->sge.length = size;
274 * iser_reg_rdma_mem - Registers memory intended for RDMA,
275 * using Fast Registration WR (if possible) obtaining rkey and va
277 * returns 0 on success, errno code on failure
280 iser_reg_rdma_mem(struct icl_iser_pdu *iser_pdu,
281 enum iser_data_dir cmd_dir)
283 struct ib_conn *ib_conn = &iser_pdu->iser_conn->ib_conn;
284 struct iser_device *device = ib_conn->device;
285 struct ib_device *ibdev = device->ib_device;
286 struct iser_data_buf *mem = &iser_pdu->data[cmd_dir];
287 struct iser_mem_reg *mem_reg = &iser_pdu->rdma_reg[cmd_dir];
288 struct fast_reg_descriptor *desc = NULL;
289 int err, aligned_len;
291 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
292 if (aligned_len != mem->dma_nents) {
293 ISER_ERR("bounce buffer is not supported");
297 if (mem->dma_nents != 1) {
298 desc = iser_reg_desc_get(ib_conn);
299 mem_reg->mem_h = desc;
302 err = iser_fast_reg_mr(iser_pdu, mem, desc ? &desc->rsc : NULL,
311 iser_reg_desc_put(ib_conn, desc);
317 iser_unreg_rdma_mem(struct icl_iser_pdu *iser_pdu,
318 enum iser_data_dir cmd_dir)
320 struct iser_mem_reg *reg = &iser_pdu->rdma_reg[cmd_dir];
325 iser_reg_desc_put(&iser_pdu->iser_conn->ib_conn,
331 iser_dma_map_task_data(struct icl_iser_pdu *iser_pdu,
332 struct iser_data_buf *data,
333 enum iser_data_dir iser_dir,
334 enum dma_data_direction dma_dir)
336 struct ib_device *dev;
338 iser_pdu->dir[iser_dir] = 1;
339 dev = iser_pdu->iser_conn->ib_conn.device->ib_device;
341 data->dma_nents = ib_dma_map_sg(dev, data->sgl, data->size, dma_dir);
342 if (data->dma_nents == 0) {
343 ISER_ERR("dma_map_sg failed");