/*- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include "mlx5_core.h" /* Handling for queue buffers -- we allocate a bunch of memory and * register it in a memory region at HCA virtual address 0. If the * requested size is > max_direct, we split the allocation into * multiple pages, so we don't require too much contiguous memory. */ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, size_t size, dma_addr_t *dma_handle, int node) { void *cpu_handle; cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, dma_handle, GFP_KERNEL); return cpu_handle; } int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf, int node) { dma_addr_t t; buf->size = size; if (size <= max_direct) { buf->nbufs = 1; buf->npages = 1; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, &t, node); if (!buf->direct.buf) return -ENOMEM; buf->direct.map = t; while (t & ((1 << buf->page_shift) - 1)) { --buf->page_shift; buf->npages *= 2; } } else { int i; buf->direct.buf = NULL; buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), GFP_KERNEL); for (i = 0; i < buf->nbufs; i++) { buf->page_list[i].buf = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, &t, node); buf->page_list[i].map = t; } if (BITS_PER_LONG == 64) { struct page **pages; pages = kmalloc(sizeof(*pages) * (buf->nbufs + 1), GFP_KERNEL); for (i = 0; i < buf->nbufs; i++) pages[i] = virt_to_page(buf->page_list[i].buf); pages[buf->nbufs] = pages[0]; buf->direct.buf = vmap(pages, buf->nbufs + 1, VM_MAP, PAGE_KERNEL); kfree(pages); if (!buf->direct.buf) goto err_free; } } return 0; err_free: mlx5_buf_free(dev, buf); return -ENOMEM; } int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, struct mlx5_buf *buf) { return mlx5_buf_alloc_node(dev, size, max_direct, buf, dev->priv.numa_node); } EXPORT_SYMBOL_GPL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) { if (buf->nbufs == 1) dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, buf->direct.map); else { int i; if (BITS_PER_LONG == 64 && buf->direct.buf) vunmap(buf->direct.buf); for (i = 0; i < buf->nbufs; i++) if (buf->page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->page_list[i].buf, buf->page_list[i].map); kfree(buf->page_list); } } EXPORT_SYMBOL_GPL(mlx5_buf_free); static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, int node) { struct mlx5_db_pgdir *pgdir; pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, &pgdir->db_dma, node); if (!pgdir->db_page) { kfree(pgdir); return NULL; } return pgdir; } static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, struct mlx5_db *db) { int offset; int i; i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); if (i >= MLX5_DB_PER_PAGE) return -ENOMEM; __clear_bit(i, pgdir->bitmap); db->u.pgdir = pgdir; db->index = i; offset = db->index * L1_CACHE_BYTES; db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); db->dma = pgdir->db_dma + offset; db->db[0] = 0; db->db[1] = 0; return 0; } int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) { struct mlx5_db_pgdir *pgdir; int ret = 0; mutex_lock(&dev->priv.pgdir_mutex); list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) if (!mlx5_alloc_db_from_pgdir(pgdir, db)) goto out; pgdir = mlx5_alloc_db_pgdir(dev, node); if (!pgdir) { ret = -ENOMEM; goto out; } list_add(&pgdir->list, &dev->priv.pgdir_list); /* This should never fail -- we just allocated an empty page: */ WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); out: mutex_unlock(&dev->priv.pgdir_mutex); return ret; } EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) { return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); } EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) { mutex_lock(&dev->priv.pgdir_mutex); __set_bit(db->index, db->u.pgdir->bitmap); if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, db->u.pgdir->db_page, db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); kfree(db->u.pgdir); } mutex_unlock(&dev->priv.pgdir_mutex); } EXPORT_SYMBOL_GPL(mlx5_db_free); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) { u64 addr; int i; for (i = 0; i < buf->npages; i++) { if (buf->nbufs == 1) addr = buf->direct.map + ((u64)i << buf->page_shift); else addr = buf->page_list[i].map; pas[i] = cpu_to_be64(addr); } } EXPORT_SYMBOL_GPL(mlx5_fill_page_array);