2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <linux/sched.h>
39 #include <linux/hugetlb.h>
41 #include <linux/dma-attrs.h>
44 #include <sys/resource.h>
45 #include <sys/resourcevar.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_pageout.h>
54 static int allow_weak_ordering;
55 module_param(allow_weak_ordering, bool, 0444);
56 MODULE_PARM_DESC(allow_weak_ordering, "Allow weak ordering for data registered memory");
58 #define IB_UMEM_MAX_PAGE_CHUNK \
59 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
60 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
61 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
64 extern int dma_map_sg_hp_wa;
66 static int dma_map_sg_ia64(struct ib_device *ibdev,
67 struct scatterlist *sg,
69 enum dma_data_direction dir)
71 int i, rc, j, lents = 0;
74 if (!dma_map_sg_hp_wa)
75 return ib_dma_map_sg(ibdev, sg, nents, dir);
77 dev = ibdev->dma_device;
78 for (i = 0; i < nents; ++i) {
79 rc = dma_map_sg(dev, sg + i, 1, dir);
81 for (j = 0; j < i; ++j)
82 dma_unmap_sg(dev, sg + j, 1, dir);
92 static void dma_unmap_sg_ia64(struct ib_device *ibdev,
93 struct scatterlist *sg,
95 enum dma_data_direction dir)
100 if (!dma_map_sg_hp_wa)
101 return ib_dma_unmap_sg(ibdev, sg, nents, dir);
103 dev = ibdev->dma_device;
104 for (i = 0; i < nents; ++i)
105 dma_unmap_sg(dev, sg + i, 1, dir);
108 #define ib_dma_map_sg(dev, sg, nents, dir) dma_map_sg_ia64(dev, sg, nents, dir)
109 #define ib_dma_unmap_sg(dev, sg, nents, dir) dma_unmap_sg_ia64(dev, sg, nents, dir)
113 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
116 struct ib_umem_chunk *chunk, *tmp;
119 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
120 ib_dma_unmap_sg_attrs(dev, chunk->page_list,
121 chunk->nents, DMA_BIDIRECTIONAL, &chunk->attrs);
122 for (i = 0; i < chunk->nents; ++i) {
123 struct page *page = sg_page(&chunk->page_list[i]);
124 if (umem->writable && dirty)
125 set_page_dirty_lock(page);
131 struct ib_umem_chunk *chunk, *tmp;
136 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
137 ib_dma_unmap_sg_attrs(dev, chunk->page_list,
138 chunk->nents, DMA_BIDIRECTIONAL, &chunk->attrs);
139 for (i = 0; i < chunk->nents; ++i) {
140 struct page *page = sg_page(&chunk->page_list[i]);
141 if (umem->writable && dirty) {
142 if (object && object != page->object)
143 VM_OBJECT_WUNLOCK(object);
144 if (object != page->object) {
145 object = page->object;
146 VM_OBJECT_WLOCK(object);
154 VM_OBJECT_WUNLOCK(object);
160 * ib_umem_get - Pin and DMA map userspace memory.
161 * @context: userspace context to pin memory for
162 * @addr: userspace virtual address to start at
163 * @size: length of region to pin
164 * @access: IB_ACCESS_xxx flags for memory being pinned
165 * @dmasync: flush in-flight DMA when the memory region is written
167 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
168 size_t size, int access, int dmasync)
171 struct ib_umem *umem;
172 struct page **page_list;
173 struct vm_area_struct **vma_list;
174 struct ib_umem_chunk *chunk;
175 unsigned long locked;
176 unsigned long lock_limit;
177 unsigned long cur_base;
178 unsigned long npages;
182 DEFINE_DMA_ATTRS(attrs);
185 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
186 else if (allow_weak_ordering)
187 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
190 return ERR_PTR(-EPERM);
192 umem = kmalloc(sizeof *umem, GFP_KERNEL);
194 return ERR_PTR(-ENOMEM);
196 umem->context = context;
198 umem->offset = addr & ~PAGE_MASK;
199 umem->page_size = PAGE_SIZE;
201 * We ask for writable memory if any access flags other than
202 * "remote read" are set. "Local write" and "remote write"
203 * obviously require write access. "Remote atomic" can do
204 * things like fetch and add, which will modify memory, and
205 * "MW bind" can change permissions by binding a window.
207 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
209 /* We assume the memory is from hugetlb until proved otherwise */
212 INIT_LIST_HEAD(&umem->chunk_list);
214 page_list = (struct page **) __get_free_page(GFP_KERNEL);
217 return ERR_PTR(-ENOMEM);
221 * if we can't alloc the vma_list, it's not so bad;
222 * just assume the memory is not hugetlb memory
224 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
228 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
230 down_write(¤t->mm->mmap_sem);
232 locked = npages + current->mm->locked_vm;
233 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
235 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
240 cur_base = addr & PAGE_MASK;
245 ret = get_user_pages(current, current->mm, cur_base,
246 min_t(unsigned long, npages,
247 PAGE_SIZE / sizeof (struct page *)),
248 1, !umem->writable, page_list, vma_list);
253 cur_base += ret * PAGE_SIZE;
259 chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
260 min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
267 chunk->attrs = attrs;
268 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
269 sg_init_table(chunk->page_list, chunk->nents);
270 for (i = 0; i < chunk->nents; ++i) {
272 !is_vm_hugetlb_page(vma_list[i + off]))
274 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
277 chunk->nmap = ib_dma_map_sg_attrs(context->device,
278 &chunk->page_list[0],
282 if (chunk->nmap <= 0) {
283 for (i = 0; i < chunk->nents; ++i)
284 put_page(sg_page(&chunk->page_list[i]));
293 list_add_tail(&chunk->list, &umem->chunk_list);
301 __ib_umem_release(context->device, umem, 0);
304 current->mm->locked_vm = locked;
306 up_write(¤t->mm->mmap_sem);
308 free_page((unsigned long) vma_list);
309 free_page((unsigned long) page_list);
311 return ret < 0 ? ERR_PTR(ret) : umem;
313 struct ib_umem *umem;
314 struct ib_umem_chunk *chunk;
317 vm_offset_t end, last, start;
323 DEFINE_DMA_ATTRS(attrs);
325 error = priv_check(curthread, PRIV_VM_MLOCK);
327 return ERR_PTR(-error);
330 start = addr & PAGE_MASK; /* Use the linux PAGE_MASK definition. */
331 end = roundup2(last, PAGE_SIZE); /* Use PAGE_MASK safe operation. */
332 if (last < addr || end < addr)
333 return ERR_PTR(-EINVAL);
334 npages = atop(end - start);
335 if (npages > vm_page_max_wired)
336 return ERR_PTR(-ENOMEM);
337 umem = kzalloc(sizeof *umem, GFP_KERNEL);
339 return ERR_PTR(-ENOMEM);
340 proc = curthread->td_proc;
343 pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
344 lim_cur(proc, RLIMIT_MEMLOCK)) {
347 return ERR_PTR(-ENOMEM);
350 if (npages + cnt.v_wire_count > vm_page_max_wired) {
352 return ERR_PTR(-EAGAIN);
354 error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
355 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES |
356 (umem->writable ? VM_MAP_WIRE_WRITE : 0));
357 if (error != KERN_SUCCESS) {
359 return ERR_PTR(-ENOMEM);
362 umem->context = context;
364 umem->offset = addr & ~PAGE_MASK;
365 umem->page_size = PAGE_SIZE;
368 * We ask for writable memory if any access flags other than
369 * "remote read" are set. "Local write" and "remote write"
370 * obviously require write access. "Remote atomic" can do
371 * things like fetch and add, which will modify memory, and
372 * "MW bind" can change permissions by binding a window.
374 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
376 INIT_LIST_HEAD(&umem->chunk_list);
378 pmap = vm_map_pmap(&proc->p_vmspace->vm_map);
381 ents = min_t(int, npages, IB_UMEM_MAX_PAGE_CHUNK);
382 chunk = kmalloc(sizeof(*chunk) +
383 (sizeof(struct scatterlist) * ents),
390 chunk->attrs = attrs;
392 sg_init_table(&chunk->page_list[0], ents);
393 for (i = 0; i < chunk->nents; ++i) {
396 pa = pmap_extract(pmap, start);
402 sg_set_page(&chunk->page_list[i], PHYS_TO_VM_PAGE(pa),
408 chunk->nmap = ib_dma_map_sg_attrs(context->device,
409 &chunk->page_list[0],
413 if (chunk->nmap != chunk->nents) {
419 list_add_tail(&chunk->list, &umem->chunk_list);
424 __ib_umem_release(context->device, umem, 0);
428 return ret < 0 ? ERR_PTR(ret) : umem;
431 EXPORT_SYMBOL(ib_umem_get);
434 static void ib_umem_account(struct work_struct *work)
436 struct ib_umem *umem = container_of(work, struct ib_umem, work);
438 down_write(&umem->mm->mmap_sem);
439 umem->mm->locked_vm -= umem->diff;
440 up_write(&umem->mm->mmap_sem);
447 * ib_umem_release - release memory pinned with ib_umem_get
448 * @umem: umem struct to release
450 void ib_umem_release(struct ib_umem *umem)
453 struct ib_ucontext *context = umem->context;
454 struct mm_struct *mm;
457 __ib_umem_release(umem->context->device, umem, 1);
459 mm = get_task_mm(current);
465 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
468 * We may be called with the mm's mmap_sem already held. This
469 * can happen when a userspace munmap() is the call that drops
470 * the last reference to our file and calls our release
471 * method. If there are memory regions to destroy, we'll end
472 * up here and not be able to take the mmap_sem. In that case
473 * we defer the vm_locked accounting to the system workqueue.
475 if (context->closing) {
476 if (!down_write_trylock(&mm->mmap_sem)) {
477 INIT_WORK(&umem->work, ib_umem_account);
481 schedule_work(&umem->work);
485 down_write(&mm->mmap_sem);
487 current->mm->locked_vm -= diff;
488 up_write(&mm->mmap_sem);
491 vm_offset_t addr, end, last, start;
495 __ib_umem_release(umem->context->device, umem, 1);
496 if (umem->context->closing) {
500 error = priv_check(curthread, PRIV_VM_MUNLOCK);
506 start = addr & PAGE_MASK; /* Use the linux PAGE_MASK definition. */
507 end = roundup2(last, PAGE_SIZE); /* Use PAGE_MASK safe operation. */
508 vm_map_unwire(&curthread->td_proc->p_vmspace->vm_map, start, end,
509 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
514 EXPORT_SYMBOL(ib_umem_release);
516 int ib_umem_page_count(struct ib_umem *umem)
518 struct ib_umem_chunk *chunk;
523 shift = ilog2(umem->page_size);
526 list_for_each_entry(chunk, &umem->chunk_list, list)
527 for (i = 0; i < chunk->nmap; ++i)
528 n += sg_dma_len(&chunk->page_list[i]) >> shift;
532 EXPORT_SYMBOL(ib_umem_page_count);
534 /**********************************************/
536 * Stub functions for contiguous pages -
537 * We currently do not support this feature
539 /**********************************************/
542 * ib_cmem_release_contiguous_pages - release memory allocated by
543 * ib_cmem_alloc_contiguous_pages.
544 * @cmem: cmem struct to release
546 void ib_cmem_release_contiguous_pages(struct ib_cmem *cmem)
549 EXPORT_SYMBOL(ib_cmem_release_contiguous_pages);
552 * * ib_cmem_alloc_contiguous_pages - allocate contiguous pages
553 * * @context: userspace context to allocate memory for
554 * * @total_size: total required size for that allocation.
555 * * @page_size_order: order of one contiguous page.
557 struct ib_cmem *ib_cmem_alloc_contiguous_pages(struct ib_ucontext *context,
558 unsigned long total_size,
559 unsigned long page_size_order)
563 EXPORT_SYMBOL(ib_cmem_alloc_contiguous_pages);
566 * * ib_cmem_map_contiguous_pages_to_vma - map contiguous pages into VMA
567 * * @ib_cmem: cmem structure returned by ib_cmem_alloc_contiguous_pages
568 * * @vma: VMA to inject pages into.
570 int ib_cmem_map_contiguous_pages_to_vma(struct ib_cmem *ib_cmem,
571 struct vm_area_struct *vma)
575 EXPORT_SYMBOL(ib_cmem_map_contiguous_pages_to_vma);