2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/errno.h>
35 #include <linux/slab.h>
37 #include <linux/module.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/vmalloc.h>
43 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
47 spin_lock(&bitmap->lock);
49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
50 if (obj >= bitmap->max) {
51 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
53 obj = find_first_zero_bit(bitmap->table, bitmap->max);
56 if (obj < bitmap->max) {
57 set_bit(obj, bitmap->table);
58 bitmap->last = (obj + 1);
59 if (bitmap->last == bitmap->max)
68 spin_unlock(&bitmap->lock);
73 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
75 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
78 static unsigned long find_aligned_range(unsigned long *bitmap,
80 int len, int align, u32 skip_mask)
85 start = ALIGN(start, align);
87 while ((start < nbits) && (test_bit(start, bitmap) ||
98 for (i = start + 1; i < end; i++) {
99 if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
108 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
109 int align, u32 skip_mask)
113 if (likely(cnt == 1 && align == 1 && !skip_mask))
114 return mlx4_bitmap_alloc(bitmap);
116 spin_lock(&bitmap->lock);
118 obj = find_aligned_range(bitmap->table, bitmap->last,
119 bitmap->max, cnt, align, skip_mask);
120 if (obj >= bitmap->max) {
121 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
123 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
124 cnt, align, skip_mask);
127 if (obj < bitmap->max) {
128 bitmap_set(bitmap->table, obj, cnt);
129 if (obj == bitmap->last) {
130 bitmap->last = (obj + cnt);
131 if (bitmap->last >= bitmap->max)
139 bitmap->avail -= cnt;
141 spin_unlock(&bitmap->lock);
146 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
148 return bitmap->avail;
151 static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
153 return obj & (bitmap->max + bitmap->reserved_top - 1);
156 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
159 obj &= bitmap->max + bitmap->reserved_top - 1;
161 spin_lock(&bitmap->lock);
163 bitmap->last = min(bitmap->last, obj);
164 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
167 bitmap_clear(bitmap->table, obj, cnt);
168 bitmap->avail += cnt;
169 spin_unlock(&bitmap->lock);
172 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
173 u32 reserved_bot, u32 reserved_top)
175 /* num must be a power of 2 */
176 if (num != roundup_pow_of_two(num))
181 bitmap->max = num - reserved_top;
183 bitmap->reserved_top = reserved_top;
184 bitmap->avail = num - reserved_top - reserved_bot;
185 bitmap->effective_len = bitmap->avail;
186 spin_lock_init(&bitmap->lock);
187 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
188 sizeof (long), GFP_KERNEL);
192 bitmap_set(bitmap->table, 0, reserved_bot);
197 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
199 kfree(bitmap->table);
202 struct mlx4_zone_allocator {
203 struct list_head entries;
204 struct list_head prios;
207 /* protect the zone_allocator from concurrent accesses */
209 enum mlx4_zone_alloc_flags flags;
212 struct mlx4_zone_entry {
213 struct list_head list;
214 struct list_head prio_list;
216 struct mlx4_zone_allocator *allocator;
217 struct mlx4_bitmap *bitmap;
221 enum mlx4_zone_flags flags;
224 struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
226 struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
231 INIT_LIST_HEAD(&zones->entries);
232 INIT_LIST_HEAD(&zones->prios);
233 spin_lock_init(&zones->lock);
236 zones->flags = flags;
241 int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
242 struct mlx4_bitmap *bitmap,
248 u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
249 struct mlx4_zone_entry *it;
250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
256 zone->bitmap = bitmap;
257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
258 zone->priority = priority;
259 zone->offset = offset;
261 spin_lock(&zone_alloc->lock);
263 zone->uid = zone_alloc->last_uid++;
264 zone->allocator = zone_alloc;
266 if (zone_alloc->mask < mask)
267 zone_alloc->mask = mask;
269 list_for_each_entry(it, &zone_alloc->prios, prio_list)
270 if (it->priority >= priority)
273 if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
274 list_add_tail(&zone->prio_list, &it->prio_list);
275 list_add_tail(&zone->list, &it->list);
277 spin_unlock(&zone_alloc->lock);
284 /* Should be called under a lock */
285 static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
287 struct mlx4_zone_allocator *zone_alloc = entry->allocator;
289 if (!list_empty(&entry->prio_list)) {
290 /* Check if we need to add an alternative node to the prio list */
291 if (!list_is_last(&entry->list, &zone_alloc->entries)) {
292 struct mlx4_zone_entry *next = list_first_entry(&entry->list,
296 if (next->priority == entry->priority)
297 list_add_tail(&next->prio_list, &entry->prio_list);
300 list_del(&entry->prio_list);
303 list_del(&entry->list);
305 if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
307 struct mlx4_zone_entry *it;
309 list_for_each_entry(it, &zone_alloc->prios, prio_list) {
310 u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
315 zone_alloc->mask = mask;
321 void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
323 struct mlx4_zone_entry *zone, *tmp;
325 spin_lock(&zone_alloc->lock);
327 list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
328 list_del(&zone->list);
329 list_del(&zone->prio_list);
333 spin_unlock(&zone_alloc->lock);
337 /* Should be called under a lock */
338 static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
339 int align, u32 skip_mask, u32 *puid)
343 struct mlx4_zone_allocator *zone_alloc = zone->allocator;
344 struct mlx4_zone_entry *curr_node;
346 res = mlx4_bitmap_alloc_range(zone->bitmap, count,
349 if (res != (u32)-1) {
355 list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
356 if (unlikely(curr_node->priority == zone->priority))
360 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
361 struct mlx4_zone_entry *it = curr_node;
363 list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
364 res = mlx4_bitmap_alloc_range(it->bitmap, count,
366 if (res != (u32)-1) {
374 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
375 struct mlx4_zone_entry *it = curr_node;
377 list_for_each_entry_from(it, &zone_alloc->entries, list) {
378 if (unlikely(it == zone))
381 if (unlikely(it->priority != curr_node->priority))
384 res = mlx4_bitmap_alloc_range(it->bitmap, count,
386 if (res != (u32)-1) {
394 if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
395 if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
398 curr_node = list_first_entry(&curr_node->prio_list,
402 list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
403 res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
405 if (res != (u32)-1) {
406 res += curr_node->offset;
407 uid = curr_node->uid;
414 if (NULL != puid && res != (u32)-1)
419 /* Should be called under a lock */
420 static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
423 mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
426 /* Should be called under a lock */
427 static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
428 struct mlx4_zone_allocator *zones, u32 uid)
430 struct mlx4_zone_entry *zone;
432 list_for_each_entry(zone, &zones->entries, list) {
433 if (zone->uid == uid)
440 struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
442 struct mlx4_zone_entry *zone;
443 struct mlx4_bitmap *bitmap;
445 spin_lock(&zones->lock);
447 zone = __mlx4_find_zone_by_uid(zones, uid);
449 bitmap = zone == NULL ? NULL : zone->bitmap;
451 spin_unlock(&zones->lock);
456 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
458 struct mlx4_zone_entry *zone;
461 spin_lock(&zones->lock);
463 zone = __mlx4_find_zone_by_uid(zones, uid);
470 res = __mlx4_zone_remove_one_entry(zone);
473 spin_unlock(&zones->lock);
479 /* Should be called under a lock */
480 static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
481 struct mlx4_zone_allocator *zones, u32 obj)
483 struct mlx4_zone_entry *zone, *zone_candidate = NULL;
486 /* Search for the smallest zone that this obj could be
487 * allocated from. This is done in order to handle
488 * situations when small bitmaps are allocated from bigger
489 * bitmaps (and the allocated space is marked as reserved in
492 list_for_each_entry(zone, &zones->entries, list) {
493 if (obj >= zone->offset) {
494 u32 mobj = (obj - zone->offset) & zones->mask;
496 if (mobj < zone->bitmap->max) {
497 u32 curr_dist = zone->bitmap->effective_len;
499 if (curr_dist < dist) {
501 zone_candidate = zone;
507 return zone_candidate;
510 u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
511 int align, u32 skip_mask, u32 *puid)
513 struct mlx4_zone_entry *zone;
516 spin_lock(&zones->lock);
518 zone = __mlx4_find_zone_by_uid(zones, uid);
523 res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
526 spin_unlock(&zones->lock);
531 u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
533 struct mlx4_zone_entry *zone;
536 spin_lock(&zones->lock);
538 zone = __mlx4_find_zone_by_uid(zones, uid);
545 __mlx4_free_from_zone(zone, obj, count);
548 spin_unlock(&zones->lock);
553 u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
555 struct mlx4_zone_entry *zone;
558 if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
561 spin_lock(&zones->lock);
563 zone = __mlx4_find_zone_by_uid_unique(zones, obj);
570 __mlx4_free_from_zone(zone, obj, count);
574 spin_unlock(&zones->lock);
579 * Handling for queue buffers -- we allocate a bunch of memory and
580 * register it in a memory region at HCA virtual address 0. If the
581 * requested size is > max_direct, we split the allocation into
582 * multiple pages, so we don't require too much contiguous memory.
585 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
586 struct mlx4_buf *buf, gfp_t gfp)
590 if (size <= max_direct) {
593 buf->page_shift = get_order(size) + PAGE_SHIFT;
594 buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
596 if (!buf->direct.buf)
601 while (t & ((1 << buf->page_shift) - 1)) {
606 memset(buf->direct.buf, 0, size);
610 buf->direct.buf = NULL;
611 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
612 buf->npages = buf->nbufs;
613 buf->page_shift = PAGE_SHIFT;
614 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
619 for (i = 0; i < buf->nbufs; ++i) {
620 buf->page_list[i].buf =
621 dma_alloc_coherent(&dev->persist->pdev->dev,
624 if (!buf->page_list[i].buf)
627 buf->page_list[i].map = t;
629 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
632 if (BITS_PER_LONG == 64) {
634 pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
637 for (i = 0; i < buf->nbufs; ++i)
638 pages[i] = virt_to_page(buf->page_list[i].buf);
639 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
641 if (!buf->direct.buf)
649 mlx4_buf_free(dev, size, buf);
653 EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
655 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
660 dma_free_coherent(&dev->persist->pdev->dev, size,
664 if (BITS_PER_LONG == 64)
665 vunmap(buf->direct.buf);
667 for (i = 0; i < buf->nbufs; ++i)
668 if (buf->page_list[i].buf)
669 dma_free_coherent(&dev->persist->pdev->dev,
671 buf->page_list[i].buf,
672 buf->page_list[i].map);
673 kfree(buf->page_list);
676 EXPORT_SYMBOL_GPL(mlx4_buf_free);
678 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
681 struct mlx4_db_pgdir *pgdir;
683 pgdir = kzalloc(sizeof *pgdir, gfp);
687 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
688 pgdir->bits[0] = pgdir->order0;
689 pgdir->bits[1] = pgdir->order1;
690 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
691 &pgdir->db_dma, gfp);
692 if (!pgdir->db_page) {
700 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
701 struct mlx4_db *db, int order)
706 for (o = order; o <= 1; ++o) {
707 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
708 if (i < MLX4_DB_PER_PAGE >> o)
715 clear_bit(i, pgdir->bits[o]);
720 set_bit(i ^ 1, pgdir->bits[order]);
724 db->db = pgdir->db_page + db->index;
725 db->dma = pgdir->db_dma + db->index * 4;
731 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
733 struct mlx4_priv *priv = mlx4_priv(dev);
734 struct mlx4_db_pgdir *pgdir;
737 mutex_lock(&priv->pgdir_mutex);
739 list_for_each_entry(pgdir, &priv->pgdir_list, list)
740 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
743 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
749 list_add(&pgdir->list, &priv->pgdir_list);
751 /* This should never fail -- we just allocated an empty page: */
752 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
755 mutex_unlock(&priv->pgdir_mutex);
759 EXPORT_SYMBOL_GPL(mlx4_db_alloc);
761 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
763 struct mlx4_priv *priv = mlx4_priv(dev);
767 mutex_lock(&priv->pgdir_mutex);
772 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
773 clear_bit(i ^ 1, db->u.pgdir->order0);
777 set_bit(i, db->u.pgdir->bits[o]);
779 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
780 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
781 db->u.pgdir->db_page, db->u.pgdir->db_dma);
782 list_del(&db->u.pgdir->list);
786 mutex_unlock(&priv->pgdir_mutex);
788 EXPORT_SYMBOL_GPL(mlx4_db_free);
790 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
791 int size, int max_direct)
795 err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
801 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
805 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
810 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
817 mlx4_mtt_cleanup(dev, &wqres->mtt);
819 mlx4_buf_free(dev, size, &wqres->buf);
821 mlx4_db_free(dev, &wqres->db);
825 EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
827 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
830 mlx4_mtt_cleanup(dev, &wqres->mtt);
831 mlx4_buf_free(dev, size, &wqres->buf);
832 mlx4_db_free(dev, &wqres->db);
834 EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);