2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 static u32 convert_access(int acc)
38 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
39 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
40 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
41 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
45 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
47 struct mlx4_ib_mr *mr;
50 mr = kmalloc(sizeof *mr, GFP_KERNEL);
52 return ERR_PTR(-ENOMEM);
54 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
55 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
59 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
63 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
69 mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
77 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
81 struct ib_umem_chunk *chunk;
87 pages = (u64 *) __get_free_page(GFP_KERNEL);
93 list_for_each_entry(chunk, &umem->chunk_list, list)
94 for (j = 0; j < chunk->nmap; ++j) {
95 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
96 for (k = 0; k < len; ++k) {
97 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
100 * Be friendly to mlx4_write_mtt() and
101 * pass it chunks of appropriate size.
103 if (i == PAGE_SIZE / sizeof (u64)) {
104 err = mlx4_write_mtt(dev->dev, mtt, n,
115 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
118 free_page((unsigned long) pages);
122 static int handle_hugetlb_user_mr(struct ib_pd *pd, struct mlx4_ib_mr *mr,
123 u64 start, u64 virt_addr, int access_flags)
125 #if defined(CONFIG_HUGETLB_PAGE) && !defined(__powerpc__) && !defined(__ia64__)
126 struct mlx4_ib_dev *dev = to_mdev(pd->device);
127 struct ib_umem_chunk *chunk;
130 unsigned cur_size = 0;
131 dma_addr_t uninitialized_var(cur_addr);
133 struct ib_umem *umem = mr->umem;
138 int off = start & (HPAGE_SIZE - 1);
140 n = DIV_ROUND_UP(off + umem->length, HPAGE_SIZE);
141 arr = kmalloc(n * sizeof *arr, GFP_KERNEL);
145 list_for_each_entry(chunk, &umem->chunk_list, list)
146 for (i = 0; i < chunk->nmap; ++i) {
147 daddr = sg_dma_address(&chunk->page_list[i]);
148 dsize = sg_dma_len(&chunk->page_list[i]);
152 } else if (cur_addr + cur_size != daddr) {
158 if (cur_size > HPAGE_SIZE) {
161 } else if (cur_size == HPAGE_SIZE) {
171 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, umem->length,
172 convert_access(access_flags), n, HPAGE_SHIFT, &mr->mmr);
176 err = mlx4_write_mtt(dev->dev, &mr->mmr.mtt, 0, n, arr);
186 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
187 u64 virt_addr, int access_flags,
188 struct ib_udata *udata)
190 struct mlx4_ib_dev *dev = to_mdev(pd->device);
191 struct mlx4_ib_mr *mr;
196 mr = kmalloc(sizeof *mr, GFP_KERNEL);
198 return ERR_PTR(-ENOMEM);
200 mr->umem = ib_umem_get(pd->uobject->context, start, length,
202 if (IS_ERR(mr->umem)) {
203 err = PTR_ERR(mr->umem);
207 if (!mr->umem->hugetlb ||
208 handle_hugetlb_user_mr(pd, mr, start, virt_addr, access_flags)) {
209 n = ib_umem_page_count(mr->umem);
210 shift = ilog2(mr->umem->page_size);
212 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
213 convert_access(access_flags), n, shift, &mr->mmr);
217 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
222 err = mlx4_mr_enable(dev->dev, &mr->mmr);
226 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
231 mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
234 ib_umem_release(mr->umem);
242 int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
244 struct mlx4_ib_mr *mr = to_mmr(ibmr);
246 mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
248 ib_umem_release(mr->umem);
254 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
255 int max_page_list_len)
257 struct mlx4_ib_dev *dev = to_mdev(pd->device);
258 struct mlx4_ib_mr *mr;
261 mr = kmalloc(sizeof *mr, GFP_KERNEL);
263 return ERR_PTR(-ENOMEM);
265 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
266 max_page_list_len, 0, &mr->mmr);
270 err = mlx4_mr_enable(dev->dev, &mr->mmr);
274 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
280 mlx4_mr_free(dev->dev, &mr->mmr);
287 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
290 struct mlx4_ib_dev *dev = to_mdev(ibdev);
291 struct mlx4_ib_fast_reg_page_list *mfrpl;
292 int size = page_list_len * sizeof (u64);
294 if (page_list_len > MAX_FAST_REG_PAGES)
295 return ERR_PTR(-EINVAL);
297 mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
299 return ERR_PTR(-ENOMEM);
301 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
302 if (!mfrpl->ibfrpl.page_list)
305 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
308 if (!mfrpl->mapped_page_list)
311 WARN_ON(mfrpl->map & 0x3f);
313 return &mfrpl->ibfrpl;
316 kfree(mfrpl->ibfrpl.page_list);
318 return ERR_PTR(-ENOMEM);
321 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
323 struct mlx4_ib_dev *dev = to_mdev(page_list->device);
324 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
325 int size = page_list->max_page_list_len * sizeof (u64);
327 dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
329 kfree(mfrpl->ibfrpl.page_list);
333 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
334 struct ib_fmr_attr *fmr_attr)
336 struct mlx4_ib_dev *dev = to_mdev(pd->device);
337 struct mlx4_ib_fmr *fmr;
340 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
342 return ERR_PTR(-ENOMEM);
344 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
345 fmr_attr->max_pages, fmr_attr->max_maps,
346 fmr_attr->page_shift, &fmr->mfmr);
350 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
354 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
359 mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
367 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
368 int npages, u64 iova)
370 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
371 struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
373 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
374 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
377 int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
379 struct ib_fmr *ibfmr;
381 struct mlx4_dev *mdev = NULL;
383 list_for_each_entry(ibfmr, fmr_list, list) {
384 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
386 mdev = to_mdev(ibfmr->device)->dev;
392 list_for_each_entry(ibfmr, fmr_list, list) {
393 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
395 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
399 * Make sure all MPT status updates are visible before issuing
400 * SYNC_TPT firmware command.
404 err = mlx4_SYNC_TPT(mdev);
406 printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when "
407 "unmapping FMRs\n", err);
412 int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
414 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
415 struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
418 err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);