]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/mlx5/mlx5_core/mlx5_alloc.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / mlx5 / mlx5_core / mlx5_alloc.c
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/vmalloc.h>
33 #include <dev/mlx5/driver.h>
34
35 #include "mlx5_core.h"
36
37 /* Handling for queue buffers -- we allocate a bunch of memory and
38  * register it in a memory region at HCA virtual address 0.  If the
39  * requested size is > max_direct, we split the allocation into
40  * multiple pages, so we don't require too much contiguous memory.
41  */
42
43 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
44                                            size_t size, dma_addr_t *dma_handle,
45                                            int node)
46 {
47         void *cpu_handle;
48
49         cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
50                                          dma_handle, GFP_KERNEL);
51         return cpu_handle;
52 }
53
54 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct,
55                         struct mlx5_buf *buf, int node)
56 {
57         dma_addr_t t;
58
59         buf->size = size;
60         if (size <= max_direct) {
61                 buf->nbufs        = 1;
62                 buf->npages       = 1;
63                 buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
64                 buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
65                                                                   &t, node);
66                 if (!buf->direct.buf)
67                         return -ENOMEM;
68
69                 buf->direct.map = t;
70
71                 while (t & ((1 << buf->page_shift) - 1)) {
72                         --buf->page_shift;
73                         buf->npages *= 2;
74                 }
75         } else {
76                 int i;
77
78                 buf->direct.buf  = NULL;
79                 buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
80                 buf->npages      = buf->nbufs;
81                 buf->page_shift  = PAGE_SHIFT;
82                 buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
83                                            GFP_KERNEL);
84
85                 for (i = 0; i < buf->nbufs; i++) {
86                         buf->page_list[i].buf =
87                                 mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
88                                                               &t, node);
89
90                         buf->page_list[i].map = t;
91                 }
92
93                 if (BITS_PER_LONG == 64) {
94                         struct page **pages;
95
96                         pages = kmalloc(sizeof(*pages) * (buf->nbufs + 1),
97                                         GFP_KERNEL);
98                         for (i = 0; i < buf->nbufs; i++)
99                                 pages[i] = virt_to_page(buf->page_list[i].buf);
100                         pages[buf->nbufs] = pages[0];
101                         buf->direct.buf = vmap(pages, buf->nbufs + 1, VM_MAP,
102                                                PAGE_KERNEL);
103                         kfree(pages);
104                         if (!buf->direct.buf)
105                                 goto err_free;
106                 }
107         }
108
109         return 0;
110
111 err_free:
112         mlx5_buf_free(dev, buf);
113
114         return -ENOMEM;
115 }
116
117 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
118                    struct mlx5_buf *buf)
119 {
120         return mlx5_buf_alloc_node(dev, size, max_direct,
121                                    buf, dev->priv.numa_node);
122 }
123 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
124
125
126 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
127 {
128         if (buf->nbufs == 1)
129                 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
130                                   buf->direct.map);
131         else {
132                 int i;
133                 if (BITS_PER_LONG == 64 && buf->direct.buf)
134                         vunmap(buf->direct.buf);
135
136                 for (i = 0; i < buf->nbufs; i++)
137                         if (buf->page_list[i].buf)
138                                 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
139                                                   buf->page_list[i].buf,
140                                                   buf->page_list[i].map);
141                 kfree(buf->page_list);
142         }
143 }
144 EXPORT_SYMBOL_GPL(mlx5_buf_free);
145
146 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
147                                                  int node)
148 {
149         struct mlx5_db_pgdir *pgdir;
150
151         pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
152
153         bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
154
155         pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
156                                                        &pgdir->db_dma, node);
157         if (!pgdir->db_page) {
158                 kfree(pgdir);
159                 return NULL;
160         }
161
162         return pgdir;
163 }
164
165 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
166                                     struct mlx5_db *db)
167 {
168         int offset;
169         int i;
170
171         i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
172         if (i >= MLX5_DB_PER_PAGE)
173                 return -ENOMEM;
174
175         __clear_bit(i, pgdir->bitmap);
176
177         db->u.pgdir = pgdir;
178         db->index   = i;
179         offset = db->index * L1_CACHE_BYTES;
180         db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
181         db->dma     = pgdir->db_dma  + offset;
182
183         db->db[0] = 0;
184         db->db[1] = 0;
185
186         return 0;
187 }
188
189 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
190 {
191         struct mlx5_db_pgdir *pgdir;
192         int ret = 0;
193
194         mutex_lock(&dev->priv.pgdir_mutex);
195
196         list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
197                 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
198                         goto out;
199
200         pgdir = mlx5_alloc_db_pgdir(dev, node);
201         if (!pgdir) {
202                 ret = -ENOMEM;
203                 goto out;
204         }
205
206         list_add(&pgdir->list, &dev->priv.pgdir_list);
207
208         /* This should never fail -- we just allocated an empty page: */
209         WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
210
211 out:
212         mutex_unlock(&dev->priv.pgdir_mutex);
213
214         return ret;
215 }
216 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
217
218 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
219 {
220         return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
221 }
222 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
223
224 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
225 {
226         mutex_lock(&dev->priv.pgdir_mutex);
227
228         __set_bit(db->index, db->u.pgdir->bitmap);
229
230         if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
231                 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
232                                   db->u.pgdir->db_page, db->u.pgdir->db_dma);
233                 list_del(&db->u.pgdir->list);
234                 kfree(db->u.pgdir);
235         }
236
237         mutex_unlock(&dev->priv.pgdir_mutex);
238 }
239 EXPORT_SYMBOL_GPL(mlx5_db_free);
240
241
242 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
243 {
244         u64 addr;
245         int i;
246
247         for (i = 0; i < buf->npages; i++) {
248                 if (buf->nbufs == 1)
249                         addr = buf->direct.map + ((u64)i << buf->page_shift);
250                 else
251                         addr = buf->page_list[i].map;
252
253                 pas[i] = cpu_to_be64(addr);
254         }
255 }
256 EXPORT_SYMBOL_GPL(mlx5_fill_page_array);