2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include <linux/rbtree.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_verbs.h>
45 struct umem_odp_node {
52 * An array of the pages included in the on-demand paging umem.
53 * Indices of pages that are currently not mapped into the device will
56 struct page **page_list;
58 * An array of the same size as page_list, with DMA addresses mapped
59 * for pages the pages in page_list. The lower two bits designate
60 * access permissions. See ODP_READ_ALLOWED_BIT and
61 * ODP_WRITE_ALLOWED_BIT.
65 * The umem_mutex protects the page_list and dma_list fields of an ODP
66 * umem, allowing only a single thread to map/unmap pages. The mutex
67 * also protects access to the mmu notifier counters.
69 struct mutex umem_mutex;
70 void *private; /* for the HW driver to use. */
72 /* When false, use the notifier counter in the ucontext struct. */
73 bool mn_counters_active;
77 /* A linked list of umems that don't have private mmu notifier
79 struct list_head no_private_counters;
83 struct umem_odp_node interval_tree;
85 struct completion notifier_completion;
89 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
91 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
93 void ib_umem_odp_release(struct ib_umem *umem);
96 * The lower 2 bits of the DMA address signal the R/W permissions for
97 * the entry. To upgrade the permissions, provide the appropriate
98 * bitmask to the map_dma_pages function.
100 * Be aware that upgrading a mapped address might result in change of
101 * the DMA address for the page.
103 #define ODP_READ_ALLOWED_BIT (1<<0ULL)
104 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
106 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
108 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
109 u64 access_mask, unsigned long current_seq);
111 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
114 void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root);
115 void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root);
116 typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
119 * Call the callback on each ib_umem in the range. Returns the logical or of
120 * the return values of the functions called.
122 int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
123 umem_call_back cb, void *cookie);
125 struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
126 u64 start, u64 last);
127 struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
128 u64 start, u64 last);
130 static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
131 unsigned long mmu_seq)
134 * This code is strongly based on the KVM code from
135 * mmu_notifier_retry. Should be called with
136 * the relevant locks taken (item->odp_data->umem_mutex
137 * and the ucontext umem_mutex semaphore locked for read).
140 /* Do not allow page faults while the new ib_umem hasn't seen a state
141 * with zero notifiers yet, and doesn't have its own valid set of
142 * private counters. */
143 if (!item->odp_data->mn_counters_active)
146 if (unlikely(item->odp_data->notifiers_count))
148 if (item->odp_data->notifiers_seq != mmu_seq)
153 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
155 static inline int ib_umem_odp_get(struct ib_ucontext *context,
156 struct ib_umem *umem)
161 static inline void ib_umem_odp_release(struct ib_umem *umem) {}
163 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
165 #endif /* IB_UMEM_ODP_H */