2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <dev/mlx5/driver.h>
31 #include "mlx5_core.h"
33 struct mlx5_pages_req {
34 struct mlx5_core_dev *dev;
37 struct work_struct work;
41 struct rb_node rb_node;
45 unsigned long bitmask;
46 struct list_head list;
50 struct mlx5_manage_pages_inbox {
51 struct mlx5_inbox_hdr hdr;
58 struct mlx5_manage_pages_outbox {
59 struct mlx5_outbox_hdr hdr;
66 MAX_RECLAIM_TIME_MSECS = 5000,
70 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
71 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
74 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
76 struct rb_root *root = &dev->priv.page_root;
77 struct rb_node **new = &root->rb_node;
78 struct rb_node *parent = NULL;
85 tfp = rb_entry(parent, struct fw_page, rb_node);
87 new = &parent->rb_left;
88 else if (tfp->addr > addr)
89 new = &parent->rb_right;
94 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
98 nfp->func_id = func_id;
99 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
100 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
101 set_bit(i, &nfp->bitmask);
103 rb_link_node(&nfp->rb_node, parent, new);
104 rb_insert_color(&nfp->rb_node, root);
105 list_add(&nfp->list, &dev->priv.free_list);
110 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
112 struct rb_root *root = &dev->priv.page_root;
113 struct rb_node *tmp = root->rb_node;
114 struct fw_page *result = NULL;
118 tfp = rb_entry(tmp, struct fw_page, rb_node);
119 if (tfp->addr < addr) {
121 } else if (tfp->addr > addr) {
132 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
133 s32 *npages, int boot)
135 u32 in[MLX5_ST_SZ_DW(query_pages_in)];
136 u32 out[MLX5_ST_SZ_DW(query_pages_out)];
139 memset(in, 0, sizeof(in));
141 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
142 MLX5_SET(query_pages_in, in, op_mod,
143 boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
145 memset(out, 0, sizeof(out));
146 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
150 *npages = MLX5_GET(query_pages_out, out, num_pages);
151 *func_id = MLX5_GET(query_pages_out, out, function_id);
156 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
161 if (list_empty(&dev->priv.free_list))
164 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
165 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
166 if (n >= MLX5_NUM_4K_IN_PAGE) {
167 mlx5_core_warn(dev, "alloc 4k bug\n");
170 clear_bit(n, &fp->bitmask);
175 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
180 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
185 fwp = find_fw_page(dev, addr & PAGE_MASK);
187 mlx5_core_warn(dev, "page not found\n");
191 n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
193 set_bit(n, &fwp->bitmask);
194 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
195 rb_erase(&fwp->rb_node, &dev->priv.page_root);
196 if (fwp->free_count != 1)
197 list_del(&fwp->list);
198 dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
200 __free_page(fwp->page);
202 } else if (fwp->free_count == 1) {
203 list_add(&fwp->list, &dev->priv.free_list);
207 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
213 page = alloc_page(GFP_HIGHUSER);
215 mlx5_core_warn(dev, "failed to allocate page\n");
218 addr = dma_map_page(&dev->pdev->dev, page, 0,
219 PAGE_SIZE, DMA_BIDIRECTIONAL);
220 if (dma_mapping_error(&dev->pdev->dev, addr)) {
221 mlx5_core_warn(dev, "failed dma mapping page\n");
225 err = insert_page(dev, addr, page, func_id);
227 mlx5_core_err(dev, "failed to track allocated page\n");
234 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
240 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
243 struct mlx5_manage_pages_inbox *in;
244 struct mlx5_manage_pages_outbox out;
245 struct mlx5_manage_pages_inbox *nin;
251 inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
252 in = mlx5_vzalloc(inlen);
254 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
258 memset(&out, 0, sizeof(out));
260 for (i = 0; i < npages; i++) {
262 err = alloc_4k(dev, &addr);
265 err = alloc_system_page(dev, func_id);
271 in->pas[i] = cpu_to_be64(addr);
274 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
275 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
276 in->func_id = cpu_to_be16(func_id);
277 in->num_entries = cpu_to_be32(npages);
278 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
280 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
281 func_id, npages, err);
284 dev->priv.fw_pages += npages;
286 if (out.hdr.status) {
287 err = mlx5_cmd_status_to_err(&out.hdr);
289 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
290 func_id, npages, out.hdr.status);
295 mlx5_core_dbg(dev, "err %d\n", err);
301 nin = kzalloc(sizeof(*nin), GFP_KERNEL);
302 memset(&out, 0, sizeof(out));
303 nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
304 nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
305 nin->func_id = cpu_to_be16(func_id);
306 if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
307 mlx5_core_warn(dev, "page notify failed\n");
310 for (i--; i >= 0; i--)
311 free_4k(dev, be64_to_cpu(in->pas[i]));
317 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
320 struct mlx5_manage_pages_inbox in;
321 struct mlx5_manage_pages_outbox *out;
331 memset(&in, 0, sizeof(in));
332 outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
333 out = mlx5_vzalloc(outlen);
337 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
338 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
339 in.func_id = cpu_to_be16(func_id);
340 in.num_entries = cpu_to_be32(npages);
341 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
342 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
344 mlx5_core_err(dev, "failed reclaiming pages\n");
348 if (out->hdr.status) {
349 err = mlx5_cmd_status_to_err(&out->hdr);
353 num_claimed = be32_to_cpu(out->num_entries);
355 *nclaimed = num_claimed;
357 dev->priv.fw_pages -= num_claimed;
359 for (i = 0; i < num_claimed; i++) {
360 addr = be64_to_cpu(out->pas[i]);
369 static void pages_work_handler(struct work_struct *work)
371 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
372 struct mlx5_core_dev *dev = req->dev;
376 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
377 else if (req->npages > 0)
378 err = give_pages(dev, req->func_id, req->npages, 1);
381 mlx5_core_warn(dev, "%s fail %d\n",
382 req->npages < 0 ? "reclaim" : "give", err);
387 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
390 struct mlx5_pages_req *req;
392 req = kzalloc(sizeof(*req), GFP_ATOMIC);
394 mlx5_core_warn(dev, "failed to allocate pages request\n");
399 req->func_id = func_id;
400 req->npages = npages;
401 INIT_WORK(&req->work, pages_work_handler);
402 if (!queue_work(dev->priv.pg_wq, &req->work))
403 mlx5_core_warn(dev, "failed to queue pages handler work\n");
406 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
408 u16 uninitialized_var(func_id);
409 s32 uninitialized_var(npages);
412 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
416 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
417 npages, boot ? "boot" : "init", func_id);
419 return give_pages(dev, func_id, npages, 0);
423 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
426 static int optimal_reclaimed_pages(void)
428 struct mlx5_cmd_prot_block *block;
429 struct mlx5_cmd_layout *lay;
432 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
433 sizeof(struct mlx5_manage_pages_outbox)) /
434 FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
439 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
441 int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
448 p = rb_first(&dev->priv.page_root);
450 fwp = rb_entry(p, struct fw_page, rb_node);
451 err = reclaim_pages(dev, fwp->func_id,
452 optimal_reclaimed_pages(),
455 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
460 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
462 if (time_after(jiffies, end)) {
463 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
471 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
473 dev->priv.page_root = RB_ROOT;
474 INIT_LIST_HEAD(&dev->priv.free_list);
477 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
482 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
484 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
485 if (!dev->priv.pg_wq)
491 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
493 destroy_workqueue(dev->priv.pg_wq);