2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <rdma/ib_umem.h>
30 #include <rdma/ib_umem_odp.h>
33 /* @umem: umem object to scan
34 * @addr: ib virtual address requested by the user
35 * @count: number of PAGE_SIZE pages covered by umem
36 * @shift: page shift for the compound pages found in the region
37 * @ncont: number of compund pages
38 * @order: log2 of the number of compound pages
40 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
41 int *ncont, int *order)
52 struct scatterlist *sg;
54 unsigned long page_shift = ilog2(umem->page_size);
56 /* With ODP we must always match OS page size. */
58 *count = ib_umem_page_count(umem);
62 *order = ilog2(roundup_pow_of_two(*count));
67 addr = addr >> page_shift;
68 tmp = (unsigned long)addr;
69 m = find_first_bit(&tmp, BITS_PER_LONG);
73 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
74 len = sg_dma_len(sg) >> page_shift;
75 pfn = sg_dma_address(sg) >> page_shift;
76 for (k = 0; k < len; k++) {
78 tmp = (unsigned long)pfn;
79 m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
85 if (base + p != pfn) {
86 tmp = (unsigned long)p;
87 m = find_first_bit(&tmp, BITS_PER_LONG);
100 m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
103 *order = ilog2(roundup_pow_of_two(i) >> m);
105 *ncont = DIV_ROUND_UP(i, (1 << m));
114 *shift = page_shift + m;
118 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
119 static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
121 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
123 if (umem_dma & ODP_READ_ALLOWED_BIT)
124 mtt_entry |= MLX5_IB_MTT_READ;
125 if (umem_dma & ODP_WRITE_ALLOWED_BIT)
126 mtt_entry |= MLX5_IB_MTT_WRITE;
133 * Populate the given array with bus addresses from the umem.
135 * dev - mlx5_ib device
136 * umem - umem to use to fill the pages
137 * page_shift - determines the page size used in the resulting array
138 * offset - offset into the umem to start from,
139 * only implemented for ODP umems
140 * num_pages - total number of pages to fill
141 * pas - bus addresses array to fill
142 * access_flags - access flags to set on all present pages.
143 use enum mlx5_ib_mtt_access_flags for this.
145 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
146 int page_shift, size_t offset, size_t num_pages,
147 __be64 *pas, int access_flags)
149 unsigned long umem_page_shift = ilog2(umem->page_size);
150 int shift = page_shift - umem_page_shift;
151 int mask = (1 << shift) - 1;
156 struct scatterlist *sg;
158 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
159 const bool odp = umem->odp_data != NULL;
163 WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
165 for (i = 0; i < num_pages; ++i) {
166 dma_addr_t pa = umem->odp_data->dma_list[offset + i];
168 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
175 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
176 len = sg_dma_len(sg) >> umem_page_shift;
177 base = sg_dma_address(sg);
178 for (k = 0; k < len; k++) {
180 cur = base + (k << umem_page_shift);
183 pas[i >> shift] = cpu_to_be64(cur);
184 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
185 i >> shift, (long long)be64_to_cpu(pas[i >> shift]));
187 mlx5_ib_dbg(dev, "=====> 0x%llx\n",
188 (long long)(base + (k << umem_page_shift)));
194 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
195 int page_shift, __be64 *pas, int access_flags)
197 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
198 ib_umem_num_pages(umem), pas,
201 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
209 page_size = (u64)1 << page_shift;
210 page_mask = page_size - 1;
211 buf_off = addr & page_mask;
212 off_size = page_size >> 6;
213 off_mask = off_size - 1;
215 if (buf_off & off_mask)
218 *offset = buf_off >> ilog2(off_size);