]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/ofed/drivers/infiniband/core/umem.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / ofed / drivers / infiniband / core / umem.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched.h>
38 #ifdef __linux__
39 #include <linux/hugetlb.h>
40 #endif
41 #include <linux/dma-attrs.h>
42
43 #include <sys/priv.h>
44 #include <sys/resource.h>
45 #include <sys/resourcevar.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_pageout.h>
51
52 #include "uverbs.h"
53
54 static int allow_weak_ordering;
55 module_param(allow_weak_ordering, bool, 0444);
56 MODULE_PARM_DESC(allow_weak_ordering,  "Allow weak ordering for data registered memory");
57
58 #define IB_UMEM_MAX_PAGE_CHUNK                                          \
59         ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /      \
60          ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -        \
61           (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
62
63 #ifdef __ia64__
64 extern int dma_map_sg_hp_wa;
65
66 static int dma_map_sg_ia64(struct ib_device *ibdev,
67                            struct scatterlist *sg,
68                            int nents,
69                            enum dma_data_direction dir)
70 {
71         int i, rc, j, lents = 0;
72         struct device *dev;
73
74         if (!dma_map_sg_hp_wa)
75                 return ib_dma_map_sg(ibdev, sg, nents, dir);
76
77         dev = ibdev->dma_device;
78         for (i = 0; i < nents; ++i) {
79                 rc = dma_map_sg(dev, sg + i, 1, dir);
80                 if (rc <= 0) {
81                         for (j = 0; j < i; ++j)
82                                 dma_unmap_sg(dev, sg + j, 1, dir);
83
84                         return 0;
85                 }
86                 lents += rc;
87         }
88
89         return lents;
90 }
91
92 static void dma_unmap_sg_ia64(struct ib_device *ibdev,
93                               struct scatterlist *sg,
94                               int nents,
95                               enum dma_data_direction dir)
96 {
97         int i;
98         struct device *dev;
99
100         if (!dma_map_sg_hp_wa)
101                 return ib_dma_unmap_sg(ibdev, sg, nents, dir);
102
103         dev = ibdev->dma_device;
104         for (i = 0; i < nents; ++i)
105                 dma_unmap_sg(dev, sg + i, 1, dir);
106 }
107
108 #define ib_dma_map_sg(dev, sg, nents, dir) dma_map_sg_ia64(dev, sg, nents, dir)
109 #define ib_dma_unmap_sg(dev, sg, nents, dir) dma_unmap_sg_ia64(dev, sg, nents, dir)
110
111 #endif
112
113 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
114 {
115 #ifdef __linux__
116         struct ib_umem_chunk *chunk, *tmp;
117         int i;
118
119         list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
120                 ib_dma_unmap_sg_attrs(dev, chunk->page_list,
121                                       chunk->nents, DMA_BIDIRECTIONAL, &chunk->attrs);
122                 for (i = 0; i < chunk->nents; ++i) {
123                         struct page *page = sg_page(&chunk->page_list[i]);
124                         if (umem->writable && dirty)
125                                 set_page_dirty_lock(page);
126                         put_page(page);
127                 }
128                 kfree(chunk);
129         }
130 #else
131         struct ib_umem_chunk *chunk, *tmp;
132         vm_object_t object;
133         int i;
134
135         object = NULL;
136         list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
137                 ib_dma_unmap_sg_attrs(dev, chunk->page_list,
138                                       chunk->nents, DMA_BIDIRECTIONAL, &chunk->attrs);
139                 for (i = 0; i < chunk->nents; ++i) {
140                         struct page *page = sg_page(&chunk->page_list[i]);
141                         if (umem->writable && dirty) {
142                                 if (object && object != page->object)
143                                         VM_OBJECT_WUNLOCK(object);
144                                 if (object != page->object) {
145                                         object = page->object;
146                                         VM_OBJECT_WLOCK(object);
147                                 }
148                                 vm_page_dirty(page);
149                         }
150                 }
151                 kfree(chunk);
152         }
153         if (object)
154                 VM_OBJECT_WUNLOCK(object);
155
156 #endif
157 }
158
159 /**
160  * ib_umem_get - Pin and DMA map userspace memory.
161  * @context: userspace context to pin memory for
162  * @addr: userspace virtual address to start at
163  * @size: length of region to pin
164  * @access: IB_ACCESS_xxx flags for memory being pinned
165  * @dmasync: flush in-flight DMA when the memory region is written
166  */
167 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
168                             size_t size, int access, int dmasync)
169 {
170 #ifdef __linux__
171         struct ib_umem *umem;
172         struct page **page_list;
173         struct vm_area_struct **vma_list;
174         struct ib_umem_chunk *chunk;
175         unsigned long locked;
176         unsigned long lock_limit;
177         unsigned long cur_base;
178         unsigned long npages;
179         int ret;
180         int off;
181         int i;
182         DEFINE_DMA_ATTRS(attrs);
183
184         if (dmasync)
185                 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
186         else if (allow_weak_ordering)
187                 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
188
189         if (!can_do_mlock())
190                 return ERR_PTR(-EPERM);
191
192         umem = kmalloc(sizeof *umem, GFP_KERNEL);
193         if (!umem)
194                 return ERR_PTR(-ENOMEM);
195
196         umem->context   = context;
197         umem->length    = size;
198         umem->offset    = addr & ~PAGE_MASK;
199         umem->page_size = PAGE_SIZE;
200         /*
201          * We ask for writable memory if any access flags other than
202          * "remote read" are set.  "Local write" and "remote write"
203          * obviously require write access.  "Remote atomic" can do
204          * things like fetch and add, which will modify memory, and
205          * "MW bind" can change permissions by binding a window.
206          */
207         umem->writable  = !!(access & ~IB_ACCESS_REMOTE_READ);
208
209         /* We assume the memory is from hugetlb until proved otherwise */
210         umem->hugetlb   = 1;
211
212         INIT_LIST_HEAD(&umem->chunk_list);
213
214         page_list = (struct page **) __get_free_page(GFP_KERNEL);
215         if (!page_list) {
216                 kfree(umem);
217                 return ERR_PTR(-ENOMEM);
218         }
219
220         /*
221          * if we can't alloc the vma_list, it's not so bad;
222          * just assume the memory is not hugetlb memory
223          */
224         vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
225         if (!vma_list)
226                 umem->hugetlb = 0;
227
228         npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
229
230         down_write(&current->mm->mmap_sem);
231
232         locked     = npages + current->mm->locked_vm;
233         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
234
235         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
236                 ret = -ENOMEM;
237                 goto out;
238         }
239
240         cur_base = addr & PAGE_MASK;
241
242         ret = 0;
243
244         while (npages) {
245                 ret = get_user_pages(current, current->mm, cur_base,
246                                      min_t(unsigned long, npages,
247                                            PAGE_SIZE / sizeof (struct page *)),
248                                      1, !umem->writable, page_list, vma_list);
249
250                 if (ret < 0)
251                         goto out;
252
253                 cur_base += ret * PAGE_SIZE;
254                 npages   -= ret;
255
256                 off = 0;
257
258                 while (ret) {
259                         chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
260                                         min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
261                                         GFP_KERNEL);
262                         if (!chunk) {
263                                 ret = -ENOMEM;
264                                 goto out;
265                         }
266
267                         chunk->attrs = attrs;
268                         chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
269                         sg_init_table(chunk->page_list, chunk->nents);
270                         for (i = 0; i < chunk->nents; ++i) {
271                                 if (vma_list &&
272                                     !is_vm_hugetlb_page(vma_list[i + off]))
273                                         umem->hugetlb = 0;
274                                 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
275                         }
276
277                         chunk->nmap = ib_dma_map_sg_attrs(context->device,
278                                                           &chunk->page_list[0],
279                                                           chunk->nents,
280                                                           DMA_BIDIRECTIONAL,
281                                                           &attrs);
282                         if (chunk->nmap <= 0) {
283                                 for (i = 0; i < chunk->nents; ++i)
284                                         put_page(sg_page(&chunk->page_list[i]));
285                                 kfree(chunk);
286
287                                 ret = -ENOMEM;
288                                 goto out;
289                         }
290
291                         ret -= chunk->nents;
292                         off += chunk->nents;
293                         list_add_tail(&chunk->list, &umem->chunk_list);
294                 }
295
296                 ret = 0;
297         }
298
299 out:
300         if (ret < 0) {
301                 __ib_umem_release(context->device, umem, 0);
302                 kfree(umem);
303         } else
304                 current->mm->locked_vm = locked;
305
306         up_write(&current->mm->mmap_sem);
307         if (vma_list)
308                 free_page((unsigned long) vma_list);
309         free_page((unsigned long) page_list);
310
311         return ret < 0 ? ERR_PTR(ret) : umem;
312 #else
313         struct ib_umem *umem;
314         struct ib_umem_chunk *chunk;
315         struct proc *proc;
316         pmap_t pmap;
317         vm_offset_t end, last, start;
318         vm_size_t npages;
319         int error;
320         int ents;
321         int ret;
322         int i;
323         DEFINE_DMA_ATTRS(attrs);
324
325         error = priv_check(curthread, PRIV_VM_MLOCK);
326         if (error)
327                 return ERR_PTR(-error);
328
329         last = addr + size;
330         start = addr & PAGE_MASK; /* Use the linux PAGE_MASK definition. */
331         end = roundup2(last, PAGE_SIZE); /* Use PAGE_MASK safe operation. */
332         if (last < addr || end < addr)
333                 return ERR_PTR(-EINVAL);
334         npages = atop(end - start);
335         if (npages > vm_page_max_wired)
336                 return ERR_PTR(-ENOMEM);
337         umem = kzalloc(sizeof *umem, GFP_KERNEL);
338         if (!umem)
339                 return ERR_PTR(-ENOMEM);
340         proc = curthread->td_proc;
341         PROC_LOCK(proc);
342         if (ptoa(npages +
343             pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
344             lim_cur(proc, RLIMIT_MEMLOCK)) {
345                 PROC_UNLOCK(proc);
346                 kfree(umem);
347                 return ERR_PTR(-ENOMEM);
348         }
349         PROC_UNLOCK(proc);
350         if (npages + cnt.v_wire_count > vm_page_max_wired) {
351                 kfree(umem);
352                 return ERR_PTR(-EAGAIN);
353         }
354         error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
355             VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES |
356             (umem->writable ? VM_MAP_WIRE_WRITE : 0));
357         if (error != KERN_SUCCESS) {
358                 kfree(umem);
359                 return ERR_PTR(-ENOMEM);
360         }
361
362         umem->context   = context;
363         umem->length    = size;
364         umem->offset    = addr & ~PAGE_MASK;
365         umem->page_size = PAGE_SIZE;
366         umem->start     = addr;
367         /*
368          * We ask for writable memory if any access flags other than
369          * "remote read" are set.  "Local write" and "remote write"
370          * obviously require write access.  "Remote atomic" can do
371          * things like fetch and add, which will modify memory, and
372          * "MW bind" can change permissions by binding a window.
373          */
374         umem->writable  = !!(access & ~IB_ACCESS_REMOTE_READ);
375         umem->hugetlb = 0;
376         INIT_LIST_HEAD(&umem->chunk_list);
377
378         pmap = vm_map_pmap(&proc->p_vmspace->vm_map);
379         ret = 0;
380         while (npages) {
381                 ents = min_t(int, npages, IB_UMEM_MAX_PAGE_CHUNK);
382                 chunk = kmalloc(sizeof(*chunk) +
383                                 (sizeof(struct scatterlist) * ents),
384                                 GFP_KERNEL);
385                 if (!chunk) {
386                         ret = -ENOMEM;
387                         goto out;
388                 }
389
390                 chunk->attrs = attrs;
391                 chunk->nents = ents;
392                 sg_init_table(&chunk->page_list[0], ents);
393                 for (i = 0; i < chunk->nents; ++i) {
394                         vm_paddr_t pa;
395
396                         pa = pmap_extract(pmap, start);
397                         if (pa == 0) {
398                                 ret = -ENOMEM;
399                                 kfree(chunk);
400                                 goto out;
401                         }
402                         sg_set_page(&chunk->page_list[i], PHYS_TO_VM_PAGE(pa),
403                             PAGE_SIZE, 0);
404                         npages--;
405                         start += PAGE_SIZE;
406                 }
407
408                 chunk->nmap = ib_dma_map_sg_attrs(context->device,
409                                                   &chunk->page_list[0],
410                                                   chunk->nents,
411                                                   DMA_BIDIRECTIONAL,
412                                                   &attrs);
413                 if (chunk->nmap != chunk->nents) {
414                         kfree(chunk);
415                         ret = -ENOMEM;
416                         goto out;
417                 }
418
419                 list_add_tail(&chunk->list, &umem->chunk_list);
420         }
421
422 out:
423         if (ret < 0) {
424                 __ib_umem_release(context->device, umem, 0);
425                 kfree(umem);
426         }
427
428         return ret < 0 ? ERR_PTR(ret) : umem;
429 #endif
430 }
431 EXPORT_SYMBOL(ib_umem_get);
432
433 #ifdef __linux__
434 static void ib_umem_account(struct work_struct *work)
435 {
436         struct ib_umem *umem = container_of(work, struct ib_umem, work);
437
438         down_write(&umem->mm->mmap_sem);
439         umem->mm->locked_vm -= umem->diff;
440         up_write(&umem->mm->mmap_sem);
441         mmput(umem->mm);
442         kfree(umem);
443 }
444 #endif
445
446 /**
447  * ib_umem_release - release memory pinned with ib_umem_get
448  * @umem: umem struct to release
449  */
450 void ib_umem_release(struct ib_umem *umem)
451 {
452 #ifdef __linux__
453         struct ib_ucontext *context = umem->context;
454         struct mm_struct *mm;
455         unsigned long diff;
456
457         __ib_umem_release(umem->context->device, umem, 1);
458
459         mm = get_task_mm(current);
460         if (!mm) {
461                 kfree(umem);
462                 return;
463         }
464
465         diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
466
467         /*
468          * We may be called with the mm's mmap_sem already held.  This
469          * can happen when a userspace munmap() is the call that drops
470          * the last reference to our file and calls our release
471          * method.  If there are memory regions to destroy, we'll end
472          * up here and not be able to take the mmap_sem.  In that case
473          * we defer the vm_locked accounting to the system workqueue.
474          */
475         if (context->closing) {
476                 if (!down_write_trylock(&mm->mmap_sem)) {
477                         INIT_WORK(&umem->work, ib_umem_account);
478                         umem->mm   = mm;
479                         umem->diff = diff;
480
481                         schedule_work(&umem->work);
482                         return;
483                 }
484         } else
485                 down_write(&mm->mmap_sem);
486
487         current->mm->locked_vm -= diff;
488         up_write(&mm->mmap_sem);
489         mmput(mm);
490 #else
491         vm_offset_t addr, end, last, start;
492         vm_size_t size;
493         int error;
494
495         __ib_umem_release(umem->context->device, umem, 1);
496         if (umem->context->closing) {
497                 kfree(umem);
498                 return;
499         }
500         error = priv_check(curthread, PRIV_VM_MUNLOCK);
501         if (error)
502                 return;
503         addr = umem->start;
504         size = umem->length;
505         last = addr + size;
506         start = addr & PAGE_MASK; /* Use the linux PAGE_MASK definition. */
507         end = roundup2(last, PAGE_SIZE); /* Use PAGE_MASK safe operation. */
508         vm_map_unwire(&curthread->td_proc->p_vmspace->vm_map, start, end,
509             VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
510         
511 #endif
512         kfree(umem);
513 }
514 EXPORT_SYMBOL(ib_umem_release);
515
516 int ib_umem_page_count(struct ib_umem *umem)
517 {
518         struct ib_umem_chunk *chunk;
519         int shift;
520         int i;
521         int n;
522
523         shift = ilog2(umem->page_size);
524
525         n = 0;
526         list_for_each_entry(chunk, &umem->chunk_list, list)
527                 for (i = 0; i < chunk->nmap; ++i)
528                         n += sg_dma_len(&chunk->page_list[i]) >> shift;
529
530         return n;
531 }
532 EXPORT_SYMBOL(ib_umem_page_count);
533
534 /**********************************************/
535 /* 
536  * Stub functions for contiguous pages - 
537  * We currently do not support this feature
538  */
539 /**********************************************/
540
541 /**
542  * ib_cmem_release_contiguous_pages - release memory allocated by
543  *                                              ib_cmem_alloc_contiguous_pages.
544  * @cmem: cmem struct to release
545  */
546 void ib_cmem_release_contiguous_pages(struct ib_cmem *cmem)
547 {
548 }
549 EXPORT_SYMBOL(ib_cmem_release_contiguous_pages);
550
551 /**
552  *  * ib_cmem_alloc_contiguous_pages - allocate contiguous pages
553  *  *  @context: userspace context to allocate memory for
554  *   * @total_size: total required size for that allocation.
555  *    * @page_size_order: order of one contiguous page.
556  *     */
557 struct ib_cmem *ib_cmem_alloc_contiguous_pages(struct ib_ucontext *context,
558                                 unsigned long total_size,
559                                                                 unsigned long page_size_order)
560 {
561         return NULL;
562 }
563 EXPORT_SYMBOL(ib_cmem_alloc_contiguous_pages);
564
565 /**
566  *  * ib_cmem_map_contiguous_pages_to_vma - map contiguous pages into VMA
567  *   * @ib_cmem: cmem structure returned by ib_cmem_alloc_contiguous_pages
568  *    * @vma: VMA to inject pages into.
569  *     */
570 int ib_cmem_map_contiguous_pages_to_vma(struct ib_cmem *ib_cmem,
571                                                 struct vm_area_struct *vma)
572 {
573         return 0;
574 }
575 EXPORT_SYMBOL(ib_cmem_map_contiguous_pages_to_vma);