]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/ofed/drivers/infiniband/hw/mlx4/mr.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / ofed / drivers / infiniband / hw / mlx4 / mr.c
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include "mlx4_ib.h"
35
36 static u32 convert_access(int acc)
37 {
38         return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC       : 0) |
39                (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
40                (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
41                (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
42                MLX4_PERM_LOCAL_READ;
43 }
44
45 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
46 {
47         struct mlx4_ib_mr *mr;
48         int err;
49
50         mr = kmalloc(sizeof *mr, GFP_KERNEL);
51         if (!mr)
52                 return ERR_PTR(-ENOMEM);
53
54         err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
55                             ~0ull, convert_access(acc), 0, 0, &mr->mmr);
56         if (err)
57                 goto err_free;
58
59         err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
60         if (err)
61                 goto err_mr;
62
63         mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
64         mr->umem = NULL;
65
66         return &mr->ibmr;
67
68 err_mr:
69         mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
70
71 err_free:
72         kfree(mr);
73
74         return ERR_PTR(err);
75 }
76
77 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
78                            struct ib_umem *umem)
79 {
80         u64 *pages;
81         struct ib_umem_chunk *chunk;
82         int i, j, k;
83         int n;
84         int len;
85         int err = 0;
86
87         pages = (u64 *) __get_free_page(GFP_KERNEL);
88         if (!pages)
89                 return -ENOMEM;
90
91         i = n = 0;
92
93         list_for_each_entry(chunk, &umem->chunk_list, list)
94                 for (j = 0; j < chunk->nmap; ++j) {
95                         len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
96                         for (k = 0; k < len; ++k) {
97                                 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
98                                         umem->page_size * k;
99                                 /*
100                                  * Be friendly to mlx4_write_mtt() and
101                                  * pass it chunks of appropriate size.
102                                  */
103                                 if (i == PAGE_SIZE / sizeof (u64)) {
104                                         err = mlx4_write_mtt(dev->dev, mtt, n,
105                                                              i, pages);
106                                         if (err)
107                                                 goto out;
108                                         n += i;
109                                         i = 0;
110                                 }
111                         }
112                 }
113
114         if (i)
115                 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
116
117 out:
118         free_page((unsigned long) pages);
119         return err;
120 }
121
122 static int handle_hugetlb_user_mr(struct ib_pd *pd, struct mlx4_ib_mr *mr,
123                                   u64 start, u64 virt_addr, int access_flags)
124 {
125 #if defined(CONFIG_HUGETLB_PAGE) && !defined(__powerpc__) && !defined(__ia64__)
126         struct mlx4_ib_dev *dev = to_mdev(pd->device);
127         struct ib_umem_chunk *chunk;
128         unsigned dsize;
129         dma_addr_t daddr;
130         unsigned cur_size = 0;
131         dma_addr_t uninitialized_var(cur_addr);
132         int n;
133         struct ib_umem  *umem = mr->umem;
134         u64 *arr;
135         int err = 0;
136         int i;
137         int j = 0;
138         int off = start & (HPAGE_SIZE - 1);
139
140         n = DIV_ROUND_UP(off + umem->length, HPAGE_SIZE);
141         arr = kmalloc(n * sizeof *arr, GFP_KERNEL);
142         if (!arr)
143                 return -ENOMEM;
144
145         list_for_each_entry(chunk, &umem->chunk_list, list)
146                 for (i = 0; i < chunk->nmap; ++i) {
147                         daddr = sg_dma_address(&chunk->page_list[i]);
148                         dsize = sg_dma_len(&chunk->page_list[i]);
149                         if (!cur_size) {
150                                 cur_addr = daddr;
151                                 cur_size = dsize;
152                         } else if (cur_addr + cur_size != daddr) {
153                                 err = -EINVAL;
154                                 goto out;
155                         } else
156                                 cur_size += dsize;
157
158                         if (cur_size > HPAGE_SIZE) {
159                                 err = -EINVAL;
160                                 goto out;
161                         } else if (cur_size == HPAGE_SIZE) {
162                                 cur_size = 0;
163                                 arr[j++] = cur_addr;
164                         }
165                 }
166
167         if (cur_size) {
168                 arr[j++] = cur_addr;
169         }
170
171         err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, umem->length,
172                             convert_access(access_flags), n, HPAGE_SHIFT, &mr->mmr);
173         if (err)
174                 goto out;
175
176         err = mlx4_write_mtt(dev->dev, &mr->mmr.mtt, 0, n, arr);
177
178 out:
179         kfree(arr);
180         return err;
181 #else
182         return -ENOSYS;
183 #endif
184 }
185
186 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
187                                   u64 virt_addr, int access_flags,
188                                   struct ib_udata *udata)
189 {
190         struct mlx4_ib_dev *dev = to_mdev(pd->device);
191         struct mlx4_ib_mr *mr;
192         int shift;
193         int err;
194         int n;
195
196         mr = kmalloc(sizeof *mr, GFP_KERNEL);
197         if (!mr)
198                 return ERR_PTR(-ENOMEM);
199
200         mr->umem = ib_umem_get(pd->uobject->context, start, length,
201                                access_flags, 0);
202         if (IS_ERR(mr->umem)) {
203                 err = PTR_ERR(mr->umem);
204                 goto err_free;
205         }
206
207         if (!mr->umem->hugetlb ||
208             handle_hugetlb_user_mr(pd, mr, start, virt_addr, access_flags)) {
209                 n = ib_umem_page_count(mr->umem);
210                 shift = ilog2(mr->umem->page_size);
211
212                 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
213                                     convert_access(access_flags), n, shift, &mr->mmr);
214                 if (err)
215                         goto err_umem;
216
217                 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
218                 if (err)
219                         goto err_mr;
220         }
221
222         err = mlx4_mr_enable(dev->dev, &mr->mmr);
223         if (err)
224                 goto err_mr;
225
226         mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
227
228         return &mr->ibmr;
229
230 err_mr:
231         mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
232
233 err_umem:
234         ib_umem_release(mr->umem);
235
236 err_free:
237         kfree(mr);
238
239         return ERR_PTR(err);
240 }
241
242 int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
243 {
244         struct mlx4_ib_mr *mr = to_mmr(ibmr);
245
246         mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
247         if (mr->umem)
248                 ib_umem_release(mr->umem);
249         kfree(mr);
250
251         return 0;
252 }
253
254 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
255                                         int max_page_list_len)
256 {
257         struct mlx4_ib_dev *dev = to_mdev(pd->device);
258         struct mlx4_ib_mr *mr;
259         int err;
260
261         mr = kmalloc(sizeof *mr, GFP_KERNEL);
262         if (!mr)
263                 return ERR_PTR(-ENOMEM);
264
265         err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
266                             max_page_list_len, 0, &mr->mmr);
267         if (err)
268                 goto err_free;
269
270         err = mlx4_mr_enable(dev->dev, &mr->mmr);
271         if (err)
272                 goto err_mr;
273
274         mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
275         mr->umem = NULL;
276
277         return &mr->ibmr;
278
279 err_mr:
280         mlx4_mr_free(dev->dev, &mr->mmr);
281
282 err_free:
283         kfree(mr);
284         return ERR_PTR(err);
285 }
286
287 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
288                                                                int page_list_len)
289 {
290         struct mlx4_ib_dev *dev = to_mdev(ibdev);
291         struct mlx4_ib_fast_reg_page_list *mfrpl;
292         int size = page_list_len * sizeof (u64);
293
294         if (page_list_len > MAX_FAST_REG_PAGES)
295                 return ERR_PTR(-EINVAL);
296
297         mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
298         if (!mfrpl)
299                 return ERR_PTR(-ENOMEM);
300
301         mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
302         if (!mfrpl->ibfrpl.page_list)
303                 goto err_free;
304
305         mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
306                                                      size, &mfrpl->map,
307                                                      GFP_KERNEL);
308         if (!mfrpl->mapped_page_list)
309                 goto err_free;
310
311         WARN_ON(mfrpl->map & 0x3f);
312
313         return &mfrpl->ibfrpl;
314
315 err_free:
316         kfree(mfrpl->ibfrpl.page_list);
317         kfree(mfrpl);
318         return ERR_PTR(-ENOMEM);
319 }
320
321 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
322 {
323         struct mlx4_ib_dev *dev = to_mdev(page_list->device);
324         struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
325         int size = page_list->max_page_list_len * sizeof (u64);
326
327         dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
328                           mfrpl->map);
329         kfree(mfrpl->ibfrpl.page_list);
330         kfree(mfrpl);
331 }
332
333 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
334                                  struct ib_fmr_attr *fmr_attr)
335 {
336         struct mlx4_ib_dev *dev = to_mdev(pd->device);
337         struct mlx4_ib_fmr *fmr;
338         int err = -ENOMEM;
339
340         fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
341         if (!fmr)
342                 return ERR_PTR(-ENOMEM);
343
344         err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
345                              fmr_attr->max_pages, fmr_attr->max_maps,
346                              fmr_attr->page_shift, &fmr->mfmr);
347         if (err)
348                 goto err_free;
349
350         err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
351         if (err)
352                 goto err_mr;
353
354         fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
355
356         return &fmr->ibfmr;
357
358 err_mr:
359         mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
360
361 err_free:
362         kfree(fmr);
363
364         return ERR_PTR(err);
365 }
366
367 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
368                       int npages, u64 iova)
369 {
370         struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
371         struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
372
373         return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
374                                  &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
375 }
376
377 int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
378 {
379         struct ib_fmr *ibfmr;
380         int err;
381         struct mlx4_dev *mdev = NULL;
382
383         list_for_each_entry(ibfmr, fmr_list, list) {
384                 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
385                         return -EINVAL;
386                 mdev = to_mdev(ibfmr->device)->dev;
387         }
388
389         if (!mdev)
390                 return 0;
391
392         list_for_each_entry(ibfmr, fmr_list, list) {
393                 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
394
395                 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
396         }
397
398         /*
399          * Make sure all MPT status updates are visible before issuing
400          * SYNC_TPT firmware command.
401          */
402         wmb();
403
404         err = mlx4_SYNC_TPT(mdev);
405         if (err)
406                 printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when "
407                        "unmapping FMRs\n", err);
408
409         return 0;
410 }
411
412 int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
413 {
414         struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
415         struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
416         int err;
417
418         err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
419
420         if (!err)
421                 kfree(ifmr);
422
423         return err;
424 }