2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/delay.h>
31 #include <dev/mlx5/driver.h>
32 #include "mlx5_core.h"
34 CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE);
36 struct mlx5_pages_req {
37 struct mlx5_core_dev *dev;
40 struct work_struct work;
45 MAX_RECLAIM_TIME_MSECS = 5000,
49 mlx5_fwp_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
51 struct mlx5_fw_page *fwp;
54 fwp = (struct mlx5_fw_page *)arg;
55 owned = MLX5_DMA_OWNED(fwp->dev);
58 MLX5_DMA_LOCK(fwp->dev);
61 KASSERT(nseg == 1, ("Number of segments is different from 1"));
62 fwp->dma_addr = segs->ds_addr;
63 fwp->load_done = MLX5_LOAD_ST_SUCCESS;
65 fwp->load_done = MLX5_LOAD_ST_FAILURE;
67 MLX5_DMA_DONE(fwp->dev);
70 MLX5_DMA_UNLOCK(fwp->dev);
74 mlx5_fwp_flush(struct mlx5_fw_page *fwp)
76 unsigned num = fwp->numpages;
79 bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREWRITE);
83 mlx5_fwp_invalidate(struct mlx5_fw_page *fwp)
85 unsigned num = fwp->numpages;
88 bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_POSTREAD);
89 bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREREAD);
94 mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num)
96 struct mlx5_fw_page *fwp;
100 /* check for special case */
102 fwp = kzalloc(sizeof(*fwp), flags);
108 /* we need sleeping context for this function */
109 if (flags & M_NOWAIT)
112 fwp = kzalloc(sizeof(*fwp) * num, flags);
114 /* serialize loading the DMA map(s) */
115 sx_xlock(&dev->cmd.dma_sx);
117 for (x = 0; x != num; x++) {
118 /* store pointer to MLX5 core device */
120 /* store number of pages left from the array */
121 fwp[x].numpages = num - x;
123 /* allocate memory */
124 err = bus_dmamem_alloc(dev->cmd.dma_tag, &fwp[x].virt_addr,
125 BUS_DMA_WAITOK | BUS_DMA_COHERENT, &fwp[x].dma_map);
129 /* load memory into DMA */
131 (void) bus_dmamap_load(
132 dev->cmd.dma_tag, fwp[x].dma_map, fwp[x].virt_addr,
133 MLX5_ADAPTER_PAGE_SIZE, &mlx5_fwp_load_mem_cb,
134 fwp + x, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
136 while (fwp[x].load_done == MLX5_LOAD_ST_NONE)
138 MLX5_DMA_UNLOCK(dev);
140 /* check for error */
141 if (fwp[x].load_done != MLX5_LOAD_ST_SUCCESS) {
142 bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr,
147 sx_xunlock(&dev->cmd.dma_sx);
152 bus_dmamap_unload(dev->cmd.dma_tag, fwp[x].dma_map);
153 bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map);
155 sx_xunlock(&dev->cmd.dma_sx);
161 mlx5_fwp_free(struct mlx5_fw_page *fwp)
163 struct mlx5_core_dev *dev;
170 /* check for special case */
171 if (fwp->numpages == 0) {
180 bus_dmamap_unload(dev->cmd.dma_tag, fwp[num].dma_map);
181 bus_dmamem_free(dev->cmd.dma_tag, fwp[num].virt_addr, fwp[num].dma_map);
188 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset)
190 size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
191 KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
193 return ((fwp + index)->dma_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
197 mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset)
199 size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
200 KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
202 return ((char *)(fwp + index)->virt_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
206 mlx5_insert_fw_page_locked(struct mlx5_core_dev *dev, struct mlx5_fw_page *nfp)
208 struct rb_root *root = &dev->priv.page_root;
209 struct rb_node **new = &root->rb_node;
210 struct rb_node *parent = NULL;
211 struct mlx5_fw_page *tfp;
215 tfp = rb_entry(parent, struct mlx5_fw_page, rb_node);
216 if (tfp->dma_addr < nfp->dma_addr)
217 new = &parent->rb_left;
218 else if (tfp->dma_addr > nfp->dma_addr)
219 new = &parent->rb_right;
224 rb_link_node(&nfp->rb_node, parent, new);
225 rb_insert_color(&nfp->rb_node, root);
229 static struct mlx5_fw_page *
230 mlx5_remove_fw_page_locked(struct mlx5_core_dev *dev, bus_addr_t addr)
232 struct rb_root *root = &dev->priv.page_root;
233 struct rb_node *tmp = root->rb_node;
234 struct mlx5_fw_page *result = NULL;
235 struct mlx5_fw_page *tfp;
238 tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node);
239 if (tfp->dma_addr < addr) {
241 } else if (tfp->dma_addr > addr) {
244 rb_erase(&tfp->rb_node, &dev->priv.page_root);
253 alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
255 struct mlx5_fw_page *fwp;
258 fwp = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
262 fwp->func_id = func_id;
265 err = mlx5_insert_fw_page_locked(dev, fwp);
266 MLX5_DMA_UNLOCK(dev);
271 /* make sure cached data is cleaned */
272 mlx5_fwp_invalidate(fwp);
274 /* store DMA address */
275 *addr = fwp->dma_addr;
281 free_4k(struct mlx5_core_dev *dev, u64 addr)
283 struct mlx5_fw_page *fwp;
286 fwp = mlx5_remove_fw_page_locked(dev, addr);
287 MLX5_DMA_UNLOCK(dev);
290 mlx5_core_warn(dev, "Cannot free 4K page at 0x%llx\n", (long long)addr);
296 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
297 s32 *npages, int boot)
299 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
300 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
303 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
304 MLX5_SET(query_pages_in, in, op_mod, boot ?
305 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
306 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
308 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
312 *npages = MLX5_GET(query_pages_out, out, num_pages);
313 *func_id = MLX5_GET(query_pages_out, out, function_id);
318 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
321 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
322 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
328 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
329 in = mlx5_vzalloc(inlen);
331 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
336 for (i = 0; i < npages; i++) {
337 err = alloc_4k(dev, &addr, func_id);
340 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
343 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
344 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
345 MLX5_SET(manage_pages_in, in, function_id, func_id);
346 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
348 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
350 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
351 func_id, npages, err);
354 dev->priv.fw_pages += npages;
355 dev->priv.pages_per_func[func_id] += npages;
357 mlx5_core_dbg(dev, "err %d\n", err);
363 nin = mlx5_vzalloc(inlen);
367 memset(&out, 0, sizeof(out));
368 MLX5_SET(manage_pages_in, nin, opcode, MLX5_CMD_OP_MANAGE_PAGES);
369 MLX5_SET(manage_pages_in, nin, op_mod, MLX5_PAGES_CANT_GIVE);
370 MLX5_SET(manage_pages_in, nin, function_id, func_id);
371 if (mlx5_cmd_exec(dev, nin, inlen, out, sizeof(out)))
372 mlx5_core_warn(dev, "page notify failed\n");
377 for (i--; i >= 0; i--)
378 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
384 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
385 u32 *in, int in_size, u32 *out, int out_size)
387 struct mlx5_fw_page *fwp;
393 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
394 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
396 /* No hard feelings, we want our pages back! */
397 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
398 func_id = MLX5_GET(manage_pages_in, in, function_id);
400 p = rb_first(&dev->priv.page_root);
401 while (p && i < npages) {
402 fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
404 if (fwp->func_id != func_id)
407 MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->dma_addr);
411 MLX5_SET(manage_pages_out, out, output_num_entries, i);
415 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
418 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
419 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
428 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
429 out = mlx5_vzalloc(outlen);
433 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
434 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
435 MLX5_SET(manage_pages_in, in, function_id, func_id);
436 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
438 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
439 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
441 mlx5_core_err(dev, "failed reclaiming pages\n");
445 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
447 *nclaimed = num_claimed;
449 dev->priv.fw_pages -= num_claimed;
450 dev->priv.pages_per_func[func_id] -= num_claimed;
451 for (i = 0; i < num_claimed; i++)
452 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
459 static void pages_work_handler(struct work_struct *work)
461 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
462 struct mlx5_core_dev *dev = req->dev;
466 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
467 else if (req->npages > 0)
468 err = give_pages(dev, req->func_id, req->npages, 1);
471 mlx5_core_warn(dev, "%s fail %d\n",
472 req->npages < 0 ? "reclaim" : "give", err);
477 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
480 struct mlx5_pages_req *req;
482 req = kzalloc(sizeof(*req), GFP_ATOMIC);
484 mlx5_core_warn(dev, "failed to allocate pages request\n");
489 req->func_id = func_id;
490 req->npages = npages;
491 INIT_WORK(&req->work, pages_work_handler);
492 if (!queue_work(dev->priv.pg_wq, &req->work))
493 mlx5_core_warn(dev, "failed to queue pages handler work\n");
496 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
498 u16 uninitialized_var(func_id);
499 s32 uninitialized_var(npages);
502 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
506 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
507 npages, boot ? "boot" : "init", func_id);
509 return give_pages(dev, func_id, npages, 0);
513 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
516 s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
518 int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
522 while (!time_after(jiffies, end)) {
523 /* exclude own function, VFs only */
524 npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
528 if (npages != prevpages)
529 end = end + msecs_to_jiffies(100);
536 mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
541 static int optimal_reclaimed_pages(void)
543 struct mlx5_cmd_prot_block *block;
544 struct mlx5_cmd_layout *lay;
547 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
548 MLX5_ST_SZ_BYTES(manage_pages_out)) /
549 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
554 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
556 int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
557 struct mlx5_fw_page *fwp;
563 p = rb_first(&dev->priv.page_root);
565 fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
566 err = reclaim_pages(dev, fwp->func_id,
567 optimal_reclaimed_pages(),
570 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
576 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
578 if (time_after(jiffies, end)) {
579 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
587 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
590 dev->priv.page_root = RB_ROOT;
593 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
598 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
600 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
601 if (!dev->priv.pg_wq)
607 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
609 destroy_workqueue(dev->priv.pg_wq);