2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/errno.h>
37 #include <linux/module.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
42 #include <dev/mlx4/cmd.h>
44 #include <linux/math64.h>
49 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
55 spin_lock(&buddy->lock);
57 for (o = order; o <= buddy->max_order; ++o)
58 if (buddy->num_free[o]) {
59 m = 1 << (buddy->max_order - o);
60 seg = find_first_bit(buddy->bits[o], m);
65 spin_unlock(&buddy->lock);
69 clear_bit(seg, buddy->bits[o]);
75 set_bit(seg ^ 1, buddy->bits[o]);
79 spin_unlock(&buddy->lock);
86 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
90 spin_lock(&buddy->lock);
92 while (test_bit(seg ^ 1, buddy->bits[order])) {
93 clear_bit(seg ^ 1, buddy->bits[order]);
94 --buddy->num_free[order];
99 set_bit(seg, buddy->bits[order]);
100 ++buddy->num_free[order];
102 spin_unlock(&buddy->lock);
105 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
109 buddy->max_order = max_order;
110 spin_lock_init(&buddy->lock);
112 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
114 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
116 if (!buddy->bits || !buddy->num_free)
119 for (i = 0; i <= buddy->max_order; ++i) {
120 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
121 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
122 if (!buddy->bits[i]) {
123 buddy->bits[i] = vzalloc(s * sizeof(long));
129 set_bit(0, buddy->bits[buddy->max_order]);
130 buddy->num_free[buddy->max_order] = 1;
135 for (i = 0; i <= buddy->max_order; ++i)
136 kvfree(buddy->bits[i]);
140 kfree(buddy->num_free);
145 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
149 for (i = 0; i <= buddy->max_order; ++i)
150 kvfree(buddy->bits[i]);
153 kfree(buddy->num_free);
156 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
158 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
163 seg_order = max_t(int, order - log_mtts_per_seg, 0);
165 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
169 offset = seg * (1 << log_mtts_per_seg);
171 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
172 offset + (1 << order) - 1)) {
173 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
180 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
186 if (mlx4_is_mfunc(dev)) {
187 set_param_l(&in_param, order);
188 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
189 RES_OP_RESERVE_AND_MAP,
191 MLX4_CMD_TIME_CLASS_A,
195 return get_param_l(&out_param);
197 return __mlx4_alloc_mtt_range(dev, order);
200 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
201 struct mlx4_mtt *mtt)
207 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
210 mtt->page_shift = page_shift;
212 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
215 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
216 if (mtt->offset == -1)
221 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
223 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
227 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
229 seg_order = max_t(int, order - log_mtts_per_seg, 0);
230 first_seg = offset / (1 << log_mtts_per_seg);
232 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
233 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
234 offset + (1 << order) - 1);
237 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
242 if (mlx4_is_mfunc(dev)) {
243 set_param_l(&in_param, offset);
244 set_param_h(&in_param, order);
245 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
247 MLX4_CMD_TIME_CLASS_A,
250 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
254 __mlx4_free_mtt_range(dev, offset, order);
257 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
262 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
264 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
266 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
268 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
270 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
272 static u32 hw_index_to_key(u32 ind)
274 return (ind >> 24) | (ind << 8);
277 static u32 key_to_hw_index(u32 key)
279 return (key << 24) | (key >> 8);
282 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
285 return mlx4_cmd(dev, mailbox->dma, mpt_index,
286 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
290 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
293 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
294 !mailbox, MLX4_CMD_HW2SW_MPT,
295 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
298 /* Must protect against concurrent access */
299 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
300 struct mlx4_mpt_entry ***mpt_entry)
303 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
304 struct mlx4_cmd_mailbox *mailbox = NULL;
306 if (mmr->enabled != MLX4_MPT_EN_HW)
309 err = mlx4_HW2SW_MPT(dev, NULL, key);
311 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
312 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
316 mmr->enabled = MLX4_MPT_EN_SW;
318 if (!mlx4_is_mfunc(dev)) {
319 **mpt_entry = mlx4_table_find(
320 &mlx4_priv(dev)->mr_table.dmpt_table,
323 mailbox = mlx4_alloc_cmd_mailbox(dev);
325 return PTR_ERR(mailbox);
327 err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
328 0, MLX4_CMD_QUERY_MPT,
329 MLX4_CMD_TIME_CLASS_B,
334 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
337 if (!(*mpt_entry) || !(**mpt_entry)) {
345 mlx4_free_cmd_mailbox(dev, mailbox);
348 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
350 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
351 struct mlx4_mpt_entry **mpt_entry)
355 if (!mlx4_is_mfunc(dev)) {
356 /* Make sure any changes to this entry are flushed */
359 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
361 /* Make sure the new status is written */
364 err = mlx4_SYNC_TPT(dev);
366 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
368 struct mlx4_cmd_mailbox *mailbox =
369 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
372 err = mlx4_SW2HW_MPT(dev, mailbox, key);
376 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
377 mmr->enabled = MLX4_MPT_EN_HW;
381 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
383 void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
384 struct mlx4_mpt_entry **mpt_entry)
386 if (mlx4_is_mfunc(dev)) {
387 struct mlx4_cmd_mailbox *mailbox =
388 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
390 mlx4_free_cmd_mailbox(dev, mailbox);
393 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
395 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
398 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
399 /* The wrapper function will put the slave's id here */
400 if (mlx4_is_mfunc(dev))
401 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
403 mpt_entry->pd_flags = cpu_to_be32(pd_flags |
404 (pdn & MLX4_MPT_PD_MASK)
405 | MLX4_MPT_PD_FLAG_EN_INV);
408 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
410 int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
411 struct mlx4_mpt_entry *mpt_entry,
414 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
415 (access & MLX4_PERM_MASK);
417 mpt_entry->flags = cpu_to_be32(flags);
420 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
422 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
423 u64 iova, u64 size, u32 access, int npages,
424 int page_shift, struct mlx4_mr *mr)
430 mr->enabled = MLX4_MPT_DISABLED;
431 mr->key = hw_index_to_key(mridx);
433 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
436 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
437 struct mlx4_cmd_mailbox *mailbox,
440 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
441 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
444 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
446 struct mlx4_priv *priv = mlx4_priv(dev);
448 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
451 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
455 if (mlx4_is_mfunc(dev)) {
456 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
458 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
460 return get_param_l(&out_param);
462 return __mlx4_mpt_reserve(dev);
465 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
467 struct mlx4_priv *priv = mlx4_priv(dev);
469 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
472 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
476 if (mlx4_is_mfunc(dev)) {
477 set_param_l(&in_param, index);
478 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
480 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
481 mlx4_warn(dev, "Failed to release mr index:%d\n",
485 __mlx4_mpt_release(dev, index);
488 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
490 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
492 return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
495 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
499 if (mlx4_is_mfunc(dev)) {
500 set_param_l(¶m, index);
501 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
503 MLX4_CMD_TIME_CLASS_A,
506 return __mlx4_mpt_alloc_icm(dev, index, gfp);
509 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
511 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
513 mlx4_table_put(dev, &mr_table->dmpt_table, index);
516 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
520 if (mlx4_is_mfunc(dev)) {
521 set_param_l(&in_param, index);
522 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
523 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
525 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
529 return __mlx4_mpt_free_icm(dev, index);
532 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
533 int npages, int page_shift, struct mlx4_mr *mr)
538 index = mlx4_mpt_reserve(dev);
542 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
543 access, npages, page_shift, mr);
545 mlx4_mpt_release(dev, index);
549 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
551 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
555 if (mr->enabled == MLX4_MPT_EN_HW) {
556 err = mlx4_HW2SW_MPT(dev, NULL,
557 key_to_hw_index(mr->key) &
558 (dev->caps.num_mpts - 1));
560 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
565 mr->enabled = MLX4_MPT_EN_SW;
567 mlx4_mtt_cleanup(dev, &mr->mtt);
572 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
576 ret = mlx4_mr_free_reserved(dev, mr);
580 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
581 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
585 EXPORT_SYMBOL_GPL(mlx4_mr_free);
587 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
589 mlx4_mtt_cleanup(dev, &mr->mtt);
592 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
594 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
595 u64 iova, u64 size, int npages,
596 int page_shift, struct mlx4_mpt_entry *mpt_entry)
600 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
604 mpt_entry->start = cpu_to_be64(iova);
605 mpt_entry->length = cpu_to_be64(size);
606 mpt_entry->entity_size = cpu_to_be32(page_shift);
607 mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
608 MLX4_MPT_FLAG_SW_OWNS));
609 if (mr->mtt.order < 0) {
610 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
611 mpt_entry->mtt_addr = 0;
613 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
615 if (mr->mtt.page_shift == 0)
616 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
618 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
619 /* fast register MR in free state */
620 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
621 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
622 MLX4_MPT_PD_FLAG_RAE);
624 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
626 mr->enabled = MLX4_MPT_EN_SW;
630 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
632 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
634 struct mlx4_cmd_mailbox *mailbox;
635 struct mlx4_mpt_entry *mpt_entry;
638 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
642 mailbox = mlx4_alloc_cmd_mailbox(dev);
643 if (IS_ERR(mailbox)) {
644 err = PTR_ERR(mailbox);
647 mpt_entry = mailbox->buf;
648 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
649 MLX4_MPT_FLAG_REGION |
652 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
653 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
654 mpt_entry->start = cpu_to_be64(mr->iova);
655 mpt_entry->length = cpu_to_be64(mr->size);
656 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
658 if (mr->mtt.order < 0) {
659 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
660 mpt_entry->mtt_addr = 0;
662 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
666 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
667 /* fast register MR in free state */
668 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
669 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
670 MLX4_MPT_PD_FLAG_RAE);
671 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
673 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
676 err = mlx4_SW2HW_MPT(dev, mailbox,
677 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
679 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
682 mr->enabled = MLX4_MPT_EN_HW;
684 mlx4_free_cmd_mailbox(dev, mailbox);
689 mlx4_free_cmd_mailbox(dev, mailbox);
692 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
695 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
697 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
698 int start_index, int npages, u64 *page_list)
700 struct mlx4_priv *priv = mlx4_priv(dev);
702 dma_addr_t dma_handle;
705 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
706 start_index, &dma_handle);
711 dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
712 npages * sizeof (u64), DMA_TO_DEVICE);
714 for (i = 0; i < npages; ++i)
715 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
717 dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
718 npages * sizeof (u64), DMA_TO_DEVICE);
723 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
724 int start_index, int npages, u64 *page_list)
729 int max_mtts_first_page;
731 /* compute how may mtts fit in the first page */
732 mtts_per_page = PAGE_SIZE / sizeof(u64);
733 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
736 chunk = min_t(int, max_mtts_first_page, npages);
739 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
743 start_index += chunk;
746 chunk = min_t(int, mtts_per_page, npages);
751 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
752 int start_index, int npages, u64 *page_list)
754 struct mlx4_cmd_mailbox *mailbox = NULL;
755 __be64 *inbox = NULL;
763 if (mlx4_is_mfunc(dev)) {
764 mailbox = mlx4_alloc_cmd_mailbox(dev);
766 return PTR_ERR(mailbox);
767 inbox = mailbox->buf;
770 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
772 inbox[0] = cpu_to_be64(mtt->offset + start_index);
774 for (i = 0; i < chunk; ++i)
775 inbox[i + 2] = cpu_to_be64(page_list[i] |
776 MLX4_MTT_FLAG_PRESENT);
777 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
779 mlx4_free_cmd_mailbox(dev, mailbox);
784 start_index += chunk;
787 mlx4_free_cmd_mailbox(dev, mailbox);
791 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
793 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
795 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
796 struct mlx4_buf *buf, gfp_t gfp)
802 page_list = kmalloc(buf->npages * sizeof *page_list,
807 for (i = 0; i < buf->npages; ++i)
809 page_list[i] = buf->direct.map + (i << buf->page_shift);
811 page_list[i] = buf->page_list[i].map;
813 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
818 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
820 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
825 if ((type == MLX4_MW_TYPE_1 &&
826 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
827 (type == MLX4_MW_TYPE_2 &&
828 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
831 index = mlx4_mpt_reserve(dev);
835 mw->key = hw_index_to_key(index);
838 mw->enabled = MLX4_MPT_DISABLED;
842 EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
844 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
846 struct mlx4_cmd_mailbox *mailbox;
847 struct mlx4_mpt_entry *mpt_entry;
850 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
854 mailbox = mlx4_alloc_cmd_mailbox(dev);
855 if (IS_ERR(mailbox)) {
856 err = PTR_ERR(mailbox);
859 mpt_entry = mailbox->buf;
861 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
862 * off, thus creating a memory window and not a memory region.
864 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
865 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
866 if (mw->type == MLX4_MW_TYPE_2) {
867 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
868 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
869 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
872 err = mlx4_SW2HW_MPT(dev, mailbox,
873 key_to_hw_index(mw->key) &
874 (dev->caps.num_mpts - 1));
876 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
879 mw->enabled = MLX4_MPT_EN_HW;
881 mlx4_free_cmd_mailbox(dev, mailbox);
886 mlx4_free_cmd_mailbox(dev, mailbox);
889 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
892 EXPORT_SYMBOL_GPL(mlx4_mw_enable);
894 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
898 if (mw->enabled == MLX4_MPT_EN_HW) {
899 err = mlx4_HW2SW_MPT(dev, NULL,
900 key_to_hw_index(mw->key) &
901 (dev->caps.num_mpts - 1));
903 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
905 mw->enabled = MLX4_MPT_EN_SW;
908 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
909 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
911 EXPORT_SYMBOL_GPL(mlx4_mw_free);
913 int mlx4_init_mr_table(struct mlx4_dev *dev)
915 struct mlx4_priv *priv = mlx4_priv(dev);
916 struct mlx4_mr_table *mr_table = &priv->mr_table;
919 /* Nothing to do for slaves - all MR handling is forwarded
921 if (mlx4_is_slave(dev))
924 if (!is_power_of_2(dev->caps.num_mpts))
927 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
928 ~0, dev->caps.reserved_mrws, 0);
932 err = mlx4_buddy_init(&mr_table->mtt_buddy,
933 ilog2((u32)dev->caps.num_mtts /
934 (1 << log_mtts_per_seg)));
938 if (dev->caps.reserved_mtts) {
939 priv->reserved_mtts =
940 mlx4_alloc_mtt_range(dev,
941 fls(dev->caps.reserved_mtts - 1));
942 if (priv->reserved_mtts < 0) {
943 mlx4_warn(dev, "MTT table of order %u is too small\n",
944 mr_table->mtt_buddy.max_order);
946 goto err_reserve_mtts;
953 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
956 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
961 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
963 struct mlx4_priv *priv = mlx4_priv(dev);
964 struct mlx4_mr_table *mr_table = &priv->mr_table;
966 if (mlx4_is_slave(dev))
968 if (priv->reserved_mtts >= 0)
969 mlx4_free_mtt_range(dev, priv->reserved_mtts,
970 fls(dev->caps.reserved_mtts - 1));
971 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
972 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
975 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
976 int npages, u64 iova)
980 if (npages > fmr->max_pages)
983 page_mask = (1 << fmr->page_shift) - 1;
985 /* We are getting page lists, so va must be page aligned. */
986 if (iova & page_mask)
989 /* Trust the user not to pass misaligned data in page_list */
991 for (i = 0; i < npages; ++i) {
992 if (page_list[i] & ~page_mask)
996 if (fmr->maps >= fmr->max_maps)
1002 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
1003 int npages, u64 iova, u32 *lkey, u32 *rkey)
1008 err = mlx4_check_fmr(fmr, page_list, npages, iova);
1014 key = key_to_hw_index(fmr->mr.key);
1015 key += dev->caps.num_mpts;
1016 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
1018 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
1020 /* Make sure MPT status is visible before writing MTT entries */
1023 dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
1024 npages * sizeof(u64), DMA_TO_DEVICE);
1026 for (i = 0; i < npages; ++i)
1027 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
1029 dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
1030 npages * sizeof(u64), DMA_TO_DEVICE);
1032 fmr->mpt->key = cpu_to_be32(key);
1033 fmr->mpt->lkey = cpu_to_be32(key);
1034 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
1035 fmr->mpt->start = cpu_to_be64(iova);
1037 /* Make MTT entries are visible before setting MPT status */
1040 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
1042 /* Make sure MPT status is visible before consumer can use FMR */
1047 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
1049 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1050 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
1052 struct mlx4_priv *priv = mlx4_priv(dev);
1055 if (max_maps > dev->caps.max_fmr_maps)
1058 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
1061 /* All MTTs must fit in the same page */
1062 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
1065 fmr->page_shift = page_shift;
1066 fmr->max_pages = max_pages;
1067 fmr->max_maps = max_maps;
1070 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
1071 page_shift, &fmr->mr);
1075 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
1087 (void) mlx4_mr_free(dev, &fmr->mr);
1090 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
1092 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1094 struct mlx4_priv *priv = mlx4_priv(dev);
1097 err = mlx4_mr_enable(dev, &fmr->mr);
1101 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
1102 key_to_hw_index(fmr->mr.key), NULL);
1108 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
1110 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
1111 u32 *lkey, u32 *rkey)
1113 struct mlx4_cmd_mailbox *mailbox;
1121 mailbox = mlx4_alloc_cmd_mailbox(dev);
1122 if (IS_ERR(mailbox)) {
1123 err = PTR_ERR(mailbox);
1124 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
1128 err = mlx4_HW2SW_MPT(dev, NULL,
1129 key_to_hw_index(fmr->mr.key) &
1130 (dev->caps.num_mpts - 1));
1131 mlx4_free_cmd_mailbox(dev, mailbox);
1133 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
1136 fmr->mr.enabled = MLX4_MPT_EN_SW;
1138 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
1140 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
1147 ret = mlx4_mr_free(dev, &fmr->mr);
1150 fmr->mr.enabled = MLX4_MPT_DISABLED;
1154 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1156 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1158 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
1159 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1161 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);