2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/compiler.h>
29 #include <linux/kref.h>
30 #include <linux/slab.h>
31 #include <rdma/ib_umem.h>
35 struct mlx5_ib_user_db_page {
36 struct list_head list;
42 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
45 struct mlx5_ib_user_db_page *page;
46 struct ib_umem_chunk *chunk;
49 mutex_lock(&context->db_page_mutex);
51 list_for_each_entry(page, &context->db_page_list, list)
52 if (page->user_virt == (virt & PAGE_MASK))
55 page = kmalloc(sizeof(*page), GFP_KERNEL);
61 page->user_virt = (virt & PAGE_MASK);
63 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
65 if (IS_ERR(page->umem)) {
66 err = PTR_ERR(page->umem);
71 list_add(&page->list, &context->db_page_list);
74 chunk = list_entry(page->umem->chunk_list.next,
75 struct ib_umem_chunk, list);
76 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
77 db->u.user_page = page;
81 mutex_unlock(&context->db_page_mutex);
86 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
88 mutex_lock(&context->db_page_mutex);
90 if (!--db->u.user_page->refcnt) {
91 list_del(&db->u.user_page->list);
92 ib_umem_release(db->u.user_page->umem);
93 kfree(db->u.user_page);
96 mutex_unlock(&context->db_page_mutex);