2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/errno.h>
37 #include <linux/module.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
42 #include <dev/mlx4/cmd.h>
44 #include <linux/math64.h>
49 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
55 spin_lock(&buddy->lock);
57 for (o = order; o <= buddy->max_order; ++o)
58 if (buddy->num_free[o]) {
59 m = 1 << (buddy->max_order - o);
60 seg = find_first_bit(buddy->bits[o], m);
65 spin_unlock(&buddy->lock);
69 clear_bit(seg, buddy->bits[o]);
75 set_bit(seg ^ 1, buddy->bits[o]);
79 spin_unlock(&buddy->lock);
86 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
90 spin_lock(&buddy->lock);
92 while (test_bit(seg ^ 1, buddy->bits[order])) {
93 clear_bit(seg ^ 1, buddy->bits[order]);
94 --buddy->num_free[order];
99 set_bit(seg, buddy->bits[order]);
100 ++buddy->num_free[order];
102 spin_unlock(&buddy->lock);
105 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
109 buddy->max_order = max_order;
110 spin_lock_init(&buddy->lock);
112 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
114 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
116 if (!buddy->bits || !buddy->num_free)
119 for (i = 0; i <= buddy->max_order; ++i) {
120 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
121 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
126 set_bit(0, buddy->bits[buddy->max_order]);
127 buddy->num_free[buddy->max_order] = 1;
132 for (i = 0; i <= buddy->max_order; ++i)
133 kfree(buddy->bits[i]);
137 kfree(buddy->num_free);
142 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
146 for (i = 0; i <= buddy->max_order; ++i)
147 kfree(buddy->bits[i]);
150 kfree(buddy->num_free);
153 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
155 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
160 seg_order = max_t(int, order - log_mtts_per_seg, 0);
162 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
166 offset = seg * (1 << log_mtts_per_seg);
168 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
169 offset + (1 << order) - 1)) {
170 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
177 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
183 if (mlx4_is_mfunc(dev)) {
184 set_param_l(&in_param, order);
185 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
186 RES_OP_RESERVE_AND_MAP,
188 MLX4_CMD_TIME_CLASS_A,
192 return get_param_l(&out_param);
194 return __mlx4_alloc_mtt_range(dev, order);
197 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
198 struct mlx4_mtt *mtt)
204 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
207 mtt->page_shift = page_shift;
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
212 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
213 if (mtt->offset == -1) {
214 mlx4_err(dev, "Failed to allocate mtts for %d pages(order %d)\n",
221 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
223 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
227 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
229 seg_order = max_t(int, order - log_mtts_per_seg, 0);
230 first_seg = offset / (1 << log_mtts_per_seg);
232 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
233 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
234 offset + (1 << order) - 1);
237 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
242 if (mlx4_is_mfunc(dev)) {
243 set_param_l(&in_param, offset);
244 set_param_h(&in_param, order);
245 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
247 MLX4_CMD_TIME_CLASS_A,
250 mlx4_warn(dev, "Failed to free mtt range at:"
251 "%d order:%d\n", offset, order);
254 __mlx4_free_mtt_range(dev, offset, order);
257 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
262 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
264 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
266 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
268 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
270 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
272 static u32 hw_index_to_key(u32 ind)
274 return (ind >> 24) | (ind << 8);
277 static u32 key_to_hw_index(u32 key)
279 return (key << 24) | (key >> 8);
282 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
285 return mlx4_cmd(dev, mailbox->dma, mpt_index,
286 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
290 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
293 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
294 !mailbox, MLX4_CMD_HW2SW_MPT,
295 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
298 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
299 u64 iova, u64 size, u32 access, int npages,
300 int page_shift, struct mlx4_mr *mr)
306 mr->enabled = MLX4_MPT_DISABLED;
307 mr->key = hw_index_to_key(mridx);
309 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
312 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
313 struct mlx4_cmd_mailbox *mailbox,
316 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
317 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
320 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
322 struct mlx4_priv *priv = mlx4_priv(dev);
324 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
327 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
331 if (mlx4_is_mfunc(dev)) {
332 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
334 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
336 return get_param_l(&out_param);
338 return __mlx4_mpt_reserve(dev);
341 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
343 struct mlx4_priv *priv = mlx4_priv(dev);
345 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
348 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
352 if (mlx4_is_mfunc(dev)) {
353 set_param_l(&in_param, index);
354 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
356 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
357 mlx4_warn(dev, "Failed to release mr index:%d\n",
361 __mlx4_mpt_release(dev, index);
364 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
366 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
368 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
371 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
375 if (mlx4_is_mfunc(dev)) {
376 set_param_l(¶m, index);
377 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
379 MLX4_CMD_TIME_CLASS_A,
382 return __mlx4_mpt_alloc_icm(dev, index);
385 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
387 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
389 mlx4_table_put(dev, &mr_table->dmpt_table, index);
392 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
396 if (mlx4_is_mfunc(dev)) {
397 set_param_l(&in_param, index);
398 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
399 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
401 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
405 return __mlx4_mpt_free_icm(dev, index);
408 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
409 int npages, int page_shift, struct mlx4_mr *mr)
414 index = mlx4_mpt_reserve(dev);
418 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
419 access, npages, page_shift, mr);
421 mlx4_mpt_release(dev, index);
425 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
427 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
431 if (mr->enabled == MLX4_MPT_EN_HW) {
432 err = mlx4_HW2SW_MPT(dev, NULL,
433 key_to_hw_index(mr->key) &
434 (dev->caps.num_mpts - 1));
436 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
437 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
441 mr->enabled = MLX4_MPT_EN_SW;
443 mlx4_mtt_cleanup(dev, &mr->mtt);
448 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
452 ret = mlx4_mr_free_reserved(dev, mr);
456 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
457 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
461 EXPORT_SYMBOL_GPL(mlx4_mr_free);
463 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
465 struct mlx4_cmd_mailbox *mailbox;
466 struct mlx4_mpt_entry *mpt_entry;
469 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
473 mailbox = mlx4_alloc_cmd_mailbox(dev);
474 if (IS_ERR(mailbox)) {
475 err = PTR_ERR(mailbox);
478 mpt_entry = mailbox->buf;
480 memset(mpt_entry, 0, sizeof *mpt_entry);
482 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
483 MLX4_MPT_FLAG_REGION |
486 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
487 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
488 mpt_entry->start = cpu_to_be64(mr->iova);
489 mpt_entry->length = cpu_to_be64(mr->size);
490 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
492 if (mr->mtt.order < 0) {
493 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
494 mpt_entry->mtt_addr = 0;
496 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
500 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
501 /* fast register MR in free state */
502 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
503 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
504 MLX4_MPT_PD_FLAG_RAE);
505 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
507 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
510 err = mlx4_SW2HW_MPT(dev, mailbox,
511 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
513 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
516 mr->enabled = MLX4_MPT_EN_HW;
518 mlx4_free_cmd_mailbox(dev, mailbox);
523 mlx4_free_cmd_mailbox(dev, mailbox);
526 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
529 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
531 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
532 int start_index, int npages, u64 *page_list)
534 struct mlx4_priv *priv = mlx4_priv(dev);
536 dma_addr_t dma_handle;
539 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
540 start_index, &dma_handle);
545 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
546 npages * sizeof (u64), DMA_TO_DEVICE);
548 for (i = 0; i < npages; ++i)
549 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
551 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
552 npages * sizeof (u64), DMA_TO_DEVICE);
557 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
558 int start_index, int npages, u64 *page_list)
563 int max_mtts_first_page;
565 /* compute how may mtts fit in the first page */
566 mtts_per_page = PAGE_SIZE / sizeof(u64);
567 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
570 chunk = min_t(int, max_mtts_first_page, npages);
573 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
577 start_index += chunk;
580 chunk = min_t(int, mtts_per_page, npages);
585 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
586 int start_index, int npages, u64 *page_list)
588 struct mlx4_cmd_mailbox *mailbox = NULL;
589 __be64 *inbox = NULL;
597 if (mlx4_is_mfunc(dev)) {
598 mailbox = mlx4_alloc_cmd_mailbox(dev);
600 return PTR_ERR(mailbox);
601 inbox = mailbox->buf;
604 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
606 inbox[0] = cpu_to_be64(mtt->offset + start_index);
608 for (i = 0; i < chunk; ++i)
609 inbox[i + 2] = cpu_to_be64(page_list[i] |
610 MLX4_MTT_FLAG_PRESENT);
611 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
613 mlx4_free_cmd_mailbox(dev, mailbox);
618 start_index += chunk;
621 mlx4_free_cmd_mailbox(dev, mailbox);
625 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
627 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
629 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
630 struct mlx4_buf *buf)
636 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
640 for (i = 0; i < buf->npages; ++i)
642 page_list[i] = buf->direct.map + (i << buf->page_shift);
644 page_list[i] = buf->page_list[i].map;
646 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
651 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
653 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
658 index = mlx4_mpt_reserve(dev);
662 mw->key = hw_index_to_key(index);
665 mw->enabled = MLX4_MPT_DISABLED;
669 EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
671 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
673 struct mlx4_cmd_mailbox *mailbox;
674 struct mlx4_mpt_entry *mpt_entry;
677 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
681 mailbox = mlx4_alloc_cmd_mailbox(dev);
682 if (IS_ERR(mailbox)) {
683 err = PTR_ERR(mailbox);
686 mpt_entry = mailbox->buf;
688 memset(mpt_entry, 0, sizeof(*mpt_entry));
690 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
691 * off, thus creating a memory window and not a memory region.
693 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
694 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
695 if (mw->type == MLX4_MW_TYPE_2) {
696 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
697 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
698 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
701 err = mlx4_SW2HW_MPT(dev, mailbox,
702 key_to_hw_index(mw->key) &
703 (dev->caps.num_mpts - 1));
705 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
708 mw->enabled = MLX4_MPT_EN_HW;
710 mlx4_free_cmd_mailbox(dev, mailbox);
715 mlx4_free_cmd_mailbox(dev, mailbox);
718 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
721 EXPORT_SYMBOL_GPL(mlx4_mw_enable);
723 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
727 if (mw->enabled == MLX4_MPT_EN_HW) {
728 err = mlx4_HW2SW_MPT(dev, NULL,
729 key_to_hw_index(mw->key) &
730 (dev->caps.num_mpts - 1));
732 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
734 mw->enabled = MLX4_MPT_EN_SW;
737 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
738 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
740 EXPORT_SYMBOL_GPL(mlx4_mw_free);
742 int mlx4_init_mr_table(struct mlx4_dev *dev)
744 struct mlx4_priv *priv = mlx4_priv(dev);
745 struct mlx4_mr_table *mr_table = &priv->mr_table;
748 /* Nothing to do for slaves - all MR handling is forwarded
750 if (mlx4_is_slave(dev))
753 if (!is_power_of_2(dev->caps.num_mpts))
756 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
757 ~0, dev->caps.reserved_mrws, 0);
761 err = mlx4_buddy_init(&mr_table->mtt_buddy,
762 ilog2(div_u64(dev->caps.num_mtts,
763 (1 << log_mtts_per_seg))));
767 if (dev->caps.reserved_mtts) {
768 priv->reserved_mtts =
769 mlx4_alloc_mtt_range(dev,
770 fls(dev->caps.reserved_mtts - 1));
771 if (priv->reserved_mtts < 0) {
772 mlx4_warn(dev, "MTT table of order %u is too small.\n",
773 mr_table->mtt_buddy.max_order);
775 goto err_reserve_mtts;
782 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
785 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
790 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
792 struct mlx4_priv *priv = mlx4_priv(dev);
793 struct mlx4_mr_table *mr_table = &priv->mr_table;
795 if (mlx4_is_slave(dev))
797 if (priv->reserved_mtts >= 0)
798 mlx4_free_mtt_range(dev, priv->reserved_mtts,
799 fls(dev->caps.reserved_mtts - 1));
800 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
801 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
804 static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
805 int npages, u64 iova)
809 if (npages > fmr->max_pages)
812 page_mask = (1 << fmr->page_shift) - 1;
814 /* We are getting page lists, so va must be page aligned. */
815 if (iova & page_mask)
818 /* Trust the user not to pass misaligned data in page_list */
820 for (i = 0; i < npages; ++i) {
821 if (page_list[i] & ~page_mask)
825 if (fmr->maps >= fmr->max_maps)
831 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
832 int npages, u64 iova, u32 *lkey, u32 *rkey)
837 err = mlx4_check_fmr(fmr, page_list, npages, iova);
843 key = key_to_hw_index(fmr->mr.key);
844 key += dev->caps.num_mpts;
845 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
847 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
849 /* Make sure MPT status is visible before writing MTT entries */
852 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
853 npages * sizeof(u64), DMA_TO_DEVICE);
855 for (i = 0; i < npages; ++i)
856 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
858 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
859 npages * sizeof(u64), DMA_TO_DEVICE);
861 fmr->mpt->key = cpu_to_be32(key);
862 fmr->mpt->lkey = cpu_to_be32(key);
863 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
864 fmr->mpt->start = cpu_to_be64(iova);
866 /* Make MTT entries are visible before setting MPT status */
869 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
871 /* Make sure MPT status is visible before consumer can use FMR */
876 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
878 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
879 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
881 struct mlx4_priv *priv = mlx4_priv(dev);
882 int err = -ENOMEM, ret;
884 if (max_maps > dev->caps.max_fmr_maps)
887 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
890 /* All MTTs must fit in the same page */
891 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
894 fmr->page_shift = page_shift;
895 fmr->max_pages = max_pages;
896 fmr->max_maps = max_maps;
899 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
900 page_shift, &fmr->mr);
904 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
916 ret = mlx4_mr_free(dev, &fmr->mr);
918 mlx4_err(dev, "Error deregistering MR. The system may have become unstable.");
921 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
923 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
925 struct mlx4_priv *priv = mlx4_priv(dev);
928 err = mlx4_mr_enable(dev, &fmr->mr);
932 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
933 key_to_hw_index(fmr->mr.key), NULL);
939 EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
941 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
942 u32 *lkey, u32 *rkey)
949 key = key_to_hw_index(fmr->mr.key) & (dev->caps.num_mpts - 1);
951 *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
953 /* Make sure MPT status is visible before changing MPT fields */
956 fmr->mr.key = hw_index_to_key(key);
958 fmr->mpt->key = cpu_to_be32(key);
959 fmr->mpt->lkey = cpu_to_be32(key);
960 fmr->mpt->length = 0;
963 /* Make sure MPT data is visible before changing MPT status */
966 *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
968 /* Make sure MPT satus is visible */
973 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
975 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
982 ret = mlx4_mr_free(dev, &fmr->mr);
985 fmr->mr.enabled = MLX4_MPT_DISABLED;
989 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
991 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
993 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
996 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);