2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kref.h>
29 #include <linux/random.h>
30 #include <linux/delay.h>
31 #include <linux/sched.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_umem_odp.h>
34 #include <rdma/ib_verbs.h>
38 MAX_PENDING_REG_MR = 8,
41 #define MLX5_UMR_ALIGN 2048
42 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
43 static __be64 mlx5_ib_update_mtt_emergency_buffer[
44 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
45 __aligned(MLX5_UMR_ALIGN);
46 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
49 static int clean_mr(struct mlx5_ib_mr *mr);
51 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
53 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
55 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
56 /* Wait until all page fault handlers using the mr complete. */
57 synchronize_srcu(&dev->mr_srcu);
63 static int order2idx(struct mlx5_ib_dev *dev, int order)
65 struct mlx5_mr_cache *cache = &dev->cache;
67 if (order < cache->ent[0].order)
70 return order - cache->ent[0].order;
73 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
75 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
76 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
79 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
80 static void update_odp_mr(struct mlx5_ib_mr *mr)
82 if (mr->umem->odp_data) {
84 * This barrier prevents the compiler from moving the
85 * setting of umem->odp_data->private to point to our
86 * MR, before reg_umr finished, to ensure that the MR
87 * initialization have finished before starting to
88 * handle invalidations.
91 mr->umem->odp_data->private = mr;
93 * Make sure we will see the new
94 * umem->odp_data->private value in the invalidation
95 * routines, before we can get page faults on the
96 * MR. Page faults can happen once we put the MR in
97 * the tree, below this line. Without the barrier,
98 * there can be a fault handling and an invalidation
99 * before umem->odp_data->private == mr is visible to
100 * the invalidation handler.
107 static void reg_mr_callback(int status, void *context)
109 struct mlx5_ib_mr *mr = context;
110 struct mlx5_ib_dev *dev = mr->dev;
111 struct mlx5_mr_cache *cache = &dev->cache;
112 int c = order2idx(dev, mr->order);
113 struct mlx5_cache_ent *ent = &cache->ent[c];
116 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
119 spin_lock_irqsave(&ent->lock, flags);
121 spin_unlock_irqrestore(&ent->lock, flags);
123 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
126 mod_timer(&dev->delay_timer, jiffies + HZ);
130 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
131 key = dev->mdev->priv.mkey_key++;
132 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
133 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
135 cache->last_add = jiffies;
137 spin_lock_irqsave(&ent->lock, flags);
138 list_add_tail(&mr->list, &ent->head);
141 spin_unlock_irqrestore(&ent->lock, flags);
143 spin_lock_irqsave(&table->lock, flags);
144 err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mr->mmkey.key),
147 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
148 spin_unlock_irqrestore(&table->lock, flags);
151 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
153 struct mlx5_mr_cache *cache = &dev->cache;
154 struct mlx5_cache_ent *ent = &cache->ent[c];
155 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
156 struct mlx5_ib_mr *mr;
157 int npages = 1 << ent->order;
163 in = kzalloc(inlen, GFP_KERNEL);
167 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
168 for (i = 0; i < num; i++) {
169 if (ent->pending >= MAX_PENDING_REG_MR) {
174 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
179 mr->order = ent->order;
183 MLX5_SET(mkc, mkc, free, 1);
184 MLX5_SET(mkc, mkc, umr_en, 1);
185 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_MTT);
187 MLX5_SET(mkc, mkc, qpn, 0xffffff);
188 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
189 MLX5_SET(mkc, mkc, log_page_size, 12);
191 spin_lock_irq(&ent->lock);
193 spin_unlock_irq(&ent->lock);
194 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey,
195 (struct mlx5_create_mkey_mbox_in *)in,
196 inlen, reg_mr_callback, mr,
197 (struct mlx5_create_mkey_mbox_out *)mr->out);
199 spin_lock_irq(&ent->lock);
201 spin_unlock_irq(&ent->lock);
202 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
212 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
214 struct mlx5_mr_cache *cache = &dev->cache;
215 struct mlx5_cache_ent *ent = &cache->ent[c];
216 struct mlx5_ib_mr *mr;
220 for (i = 0; i < num; i++) {
221 spin_lock_irq(&ent->lock);
222 if (list_empty(&ent->head)) {
223 spin_unlock_irq(&ent->lock);
226 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
230 spin_unlock_irq(&ent->lock);
231 err = destroy_mkey(dev, mr);
233 mlx5_ib_warn(dev, "failed destroy mkey\n");
239 static int someone_adding(struct mlx5_mr_cache *cache)
243 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
244 if (cache->ent[i].cur < cache->ent[i].limit)
251 static void __cache_work_func(struct mlx5_cache_ent *ent)
253 struct mlx5_ib_dev *dev = ent->dev;
254 struct mlx5_mr_cache *cache = &dev->cache;
255 int i = order2idx(dev, ent->order);
261 ent = &dev->cache.ent[i];
262 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
263 err = add_keys(dev, i, 1);
264 if (ent->cur < 2 * ent->limit) {
265 if (err == -EAGAIN) {
266 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
268 queue_delayed_work(cache->wq, &ent->dwork,
269 msecs_to_jiffies(3));
271 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
273 queue_delayed_work(cache->wq, &ent->dwork,
274 msecs_to_jiffies(1000));
276 queue_work(cache->wq, &ent->work);
279 } else if (ent->cur > 2 * ent->limit) {
281 * The remove_keys() logic is performed as garbage collection
282 * task. Such task is intended to be run when no other active
283 * processes are running.
285 * The need_resched() will return TRUE if there are user tasks
286 * to be activated in near future.
288 * In such case, we don't execute remove_keys() and postpone
289 * the garbage collection work to try to run in next cycle,
290 * in order to free CPU resources to other tasks.
292 if (!need_resched() && !someone_adding(cache) &&
293 time_after(jiffies, cache->last_add + 300 * HZ)) {
294 remove_keys(dev, i, 1);
295 if (ent->cur > ent->limit)
296 queue_work(cache->wq, &ent->work);
298 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
303 static void delayed_cache_work_func(struct work_struct *work)
305 struct mlx5_cache_ent *ent;
307 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
308 __cache_work_func(ent);
311 static void cache_work_func(struct work_struct *work)
313 struct mlx5_cache_ent *ent;
315 ent = container_of(work, struct mlx5_cache_ent, work);
316 __cache_work_func(ent);
319 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
321 struct mlx5_mr_cache *cache = &dev->cache;
322 struct mlx5_ib_mr *mr = NULL;
323 struct mlx5_cache_ent *ent;
327 c = order2idx(dev, order);
328 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
329 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
333 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
334 ent = &cache->ent[i];
336 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
338 spin_lock_irq(&ent->lock);
339 if (!list_empty(&ent->head)) {
340 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
344 spin_unlock_irq(&ent->lock);
345 if (ent->cur < ent->limit)
346 queue_work(cache->wq, &ent->work);
349 spin_unlock_irq(&ent->lock);
351 queue_work(cache->wq, &ent->work);
355 cache->ent[c].miss++;
360 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
362 struct mlx5_mr_cache *cache = &dev->cache;
363 struct mlx5_cache_ent *ent;
367 c = order2idx(dev, mr->order);
368 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
369 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
372 ent = &cache->ent[c];
373 spin_lock_irq(&ent->lock);
374 list_add_tail(&mr->list, &ent->head);
376 if (ent->cur > 2 * ent->limit)
378 spin_unlock_irq(&ent->lock);
381 queue_work(cache->wq, &ent->work);
384 static void clean_keys(struct mlx5_ib_dev *dev, int c)
386 struct mlx5_mr_cache *cache = &dev->cache;
387 struct mlx5_cache_ent *ent = &cache->ent[c];
388 struct mlx5_ib_mr *mr;
391 cancel_delayed_work(&ent->dwork);
393 spin_lock_irq(&ent->lock);
394 if (list_empty(&ent->head)) {
395 spin_unlock_irq(&ent->lock);
398 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
402 spin_unlock_irq(&ent->lock);
403 err = destroy_mkey(dev, mr);
405 mlx5_ib_warn(dev, "failed destroy mkey\n");
411 static void delay_time_func(unsigned long ctx)
413 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
418 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
420 struct mlx5_mr_cache *cache = &dev->cache;
421 struct mlx5_cache_ent *ent;
425 mutex_init(&dev->slow_path_mutex);
426 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
428 mlx5_ib_warn(dev, "failed to create work queue\n");
432 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
433 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
434 INIT_LIST_HEAD(&cache->ent[i].head);
435 spin_lock_init(&cache->ent[i].lock);
437 ent = &cache->ent[i];
438 INIT_LIST_HEAD(&ent->head);
439 spin_lock_init(&ent->lock);
443 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
444 limit = dev->mdev->profile->mr_cache[i].limit;
448 INIT_WORK(&ent->work, cache_work_func);
449 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
451 queue_work(cache->wq, &ent->work);
457 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
461 dev->cache.stopped = 1;
462 flush_workqueue(dev->cache.wq);
464 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
467 destroy_workqueue(dev->cache.wq);
468 del_timer_sync(&dev->delay_timer);
473 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
475 struct mlx5_ib_dev *dev = to_mdev(pd->device);
476 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
477 struct mlx5_core_dev *mdev = dev->mdev;
478 struct mlx5_ib_mr *mr;
483 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
485 return ERR_PTR(-ENOMEM);
487 in = kzalloc(inlen, GFP_KERNEL);
493 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
495 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
496 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
497 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
498 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
499 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
500 MLX5_SET(mkc, mkc, lr, 1);
502 MLX5_SET(mkc, mkc, length64, 1);
503 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
504 MLX5_SET(mkc, mkc, qpn, 0xffffff);
505 MLX5_SET64(mkc, mkc, start_addr, 0);
507 err = mlx5_core_create_mkey(mdev, &mr->mmkey,
508 (struct mlx5_create_mkey_mbox_in *)in,
509 inlen, NULL, NULL, NULL);
514 mr->ibmr.lkey = mr->mmkey.key;
515 mr->ibmr.rkey = mr->mmkey.key;
529 static int get_octo_len(u64 addr, u64 len, int page_size)
534 offset = addr & (page_size - 1);
535 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
536 return (npages + 1) / 2;
539 static int use_umr(int order)
541 return order <= MLX5_MAX_UMR_SHIFT;
544 static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
545 int npages, int page_shift, int *size,
546 __be64 **mr_pas, dma_addr_t *dma)
549 struct device *ddev = dev->ib_dev.dma_device;
552 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
553 * To avoid copying garbage after the pas array, we allocate
556 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
557 *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
561 pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
562 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
563 /* Clear padding after the actual pages. */
564 memset(pas + npages, 0, *size - npages * sizeof(u64));
566 *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
567 if (dma_mapping_error(ddev, *dma)) {
575 static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
576 struct ib_sge *sg, u64 dma, int n, u32 key,
579 struct mlx5_ib_dev *dev = to_mdev(pd->device);
580 struct mlx5_umr_wr *umrwr = umr_wr(wr);
583 sg->length = ALIGN(sizeof(u64) * n, 64);
584 sg->lkey = dev->umrc.pd->local_dma_lkey;
593 wr->opcode = MLX5_IB_WR_UMR;
596 umrwr->page_shift = page_shift;
600 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
601 struct ib_sge *sg, u64 dma, int n, u32 key,
602 int page_shift, u64 virt_addr, u64 len,
605 struct mlx5_umr_wr *umrwr = umr_wr(wr);
607 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
611 umrwr->target.virt_addr = virt_addr;
613 umrwr->access_flags = access_flags;
617 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
618 struct ib_send_wr *wr, u32 key)
620 struct mlx5_umr_wr *umrwr = umr_wr(wr);
622 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
623 wr->opcode = MLX5_IB_WR_UMR;
627 static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
628 int access_flags, int *npages,
629 int *page_shift, int *ncont, int *order)
631 struct mlx5_ib_dev *dev = to_mdev(pd->device);
632 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
635 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
639 mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
641 mlx5_ib_warn(dev, "avoid zero region\n");
642 ib_umem_release(umem);
643 return ERR_PTR(-EINVAL);
646 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
647 *npages, *ncont, *order, *page_shift);
652 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
654 struct mlx5_ib_umr_context *context =
655 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
657 context->status = wc->status;
658 complete(&context->done);
661 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
663 context->cqe.done = mlx5_ib_umr_done;
664 context->status = -1;
665 init_completion(&context->done);
668 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
669 u64 virt_addr, u64 len, int npages,
670 int page_shift, int order, int access_flags)
672 struct mlx5_ib_dev *dev = to_mdev(pd->device);
673 struct device *ddev = dev->ib_dev.dma_device;
674 struct umr_common *umrc = &dev->umrc;
675 struct mlx5_ib_umr_context umr_context;
676 struct mlx5_umr_wr umrwr = {};
677 struct ib_send_wr *bad;
678 struct mlx5_ib_mr *mr;
686 for (i = 0; i < 1; i++) {
687 mr = alloc_cached_mr(dev, order);
691 err = add_keys(dev, order2idx(dev, order), 1);
692 if (err && err != -EAGAIN) {
693 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
699 return ERR_PTR(-EAGAIN);
701 err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
706 mlx5_ib_init_umr_context(&umr_context);
708 umrwr.wr.wr_cqe = &umr_context.cqe;
709 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
710 page_shift, virt_addr, len, access_flags);
713 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
715 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
718 wait_for_completion(&umr_context.done);
719 if (umr_context.status != IB_WC_SUCCESS) {
720 mlx5_ib_warn(dev, "reg umr failed\n");
725 mr->mmkey.iova = virt_addr;
726 mr->mmkey.size = len;
727 mr->mmkey.pd = to_mpd(pd)->pdn;
733 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
739 free_cached_mr(dev, mr);
746 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
747 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
750 struct mlx5_ib_dev *dev = mr->dev;
751 struct device *ddev = dev->ib_dev.dma_device;
752 struct umr_common *umrc = &dev->umrc;
753 struct mlx5_ib_umr_context umr_context;
754 struct ib_umem *umem = mr->umem;
758 struct ib_send_wr *bad;
759 struct mlx5_umr_wr wr;
762 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
763 const int page_index_mask = page_index_alignment - 1;
764 size_t pages_mapped = 0;
765 size_t pages_to_map = 0;
766 size_t pages_iter = 0;
767 int use_emergency_buf = 0;
769 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
770 * so we need to align the offset and length accordingly */
771 if (start_page_index & page_index_mask) {
772 npages += start_page_index & page_index_mask;
773 start_page_index &= ~page_index_mask;
776 pages_to_map = ALIGN(npages, page_index_alignment);
778 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
781 size = sizeof(u64) * pages_to_map;
782 size = min_t(int, PAGE_SIZE, size);
783 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
784 * code, when we are called from an invalidation. The pas buffer must
785 * be 2k-aligned for Connect-IB. */
786 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
788 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
789 pas = mlx5_ib_update_mtt_emergency_buffer;
790 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
791 use_emergency_buf = 1;
792 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
793 memset(pas, 0, size);
795 pages_iter = size / sizeof(u64);
796 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
797 if (dma_mapping_error(ddev, dma)) {
798 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
803 for (pages_mapped = 0;
804 pages_mapped < pages_to_map && !err;
805 pages_mapped += pages_iter, start_page_index += pages_iter) {
806 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
808 npages = min_t(size_t,
810 ib_umem_num_pages(umem) - start_page_index);
813 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
814 start_page_index, npages, pas,
815 MLX5_IB_MTT_PRESENT);
816 /* Clear padding after the pages brought from the
818 memset(pas + npages, 0, size - npages * sizeof(u64));
821 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
823 mlx5_ib_init_umr_context(&umr_context);
825 memset(&wr, 0, sizeof(wr));
826 wr.wr.wr_cqe = &umr_context.cqe;
829 sg.length = ALIGN(npages * sizeof(u64),
830 MLX5_UMR_MTT_ALIGNMENT);
831 sg.lkey = dev->umrc.pd->local_dma_lkey;
833 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
834 MLX5_IB_SEND_UMR_UPDATE_MTT;
837 wr.wr.opcode = MLX5_IB_WR_UMR;
838 wr.npages = sg.length / sizeof(u64);
839 wr.page_shift = PAGE_SHIFT;
840 wr.mkey = mr->mmkey.key;
841 wr.target.offset = start_page_index;
844 err = ib_post_send(umrc->qp, &wr.wr, &bad);
846 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
848 wait_for_completion(&umr_context.done);
849 if (umr_context.status != IB_WC_SUCCESS) {
850 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
857 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
860 if (!use_emergency_buf)
861 free_page((unsigned long)pas);
863 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
870 * If ibmr is NULL it will be allocated by reg_create.
871 * Else, the given ibmr will be used.
873 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
874 u64 virt_addr, u64 length,
875 struct ib_umem *umem, int npages,
876 int page_shift, int access_flags)
878 struct mlx5_ib_dev *dev = to_mdev(pd->device);
879 struct mlx5_ib_mr *mr;
885 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
887 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
889 return ERR_PTR(-ENOMEM);
891 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
892 sizeof(*pas) * ((npages + 1) / 2) * 2;
893 in = mlx5_vzalloc(inlen);
898 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
899 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
900 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
902 /* The pg_access bit allows setting the access flags
903 * in the page list submitted with the command. */
904 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
906 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
907 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_MTT);
908 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
909 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
910 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
911 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
912 MLX5_SET(mkc, mkc, lr, 1);
914 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
915 MLX5_SET64(mkc, mkc, len, length);
916 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
917 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
918 MLX5_SET(mkc, mkc, translations_octword_size,
919 get_octo_len(virt_addr, length, 1 << page_shift));
920 MLX5_SET(mkc, mkc, log_page_size, page_shift);
921 MLX5_SET(mkc, mkc, qpn, 0xffffff);
922 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
923 get_octo_len(virt_addr, length, 1 << page_shift));
925 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey,
926 (struct mlx5_create_mkey_mbox_in *)in,
927 inlen, NULL, NULL, NULL);
929 mlx5_ib_warn(dev, "create mkey failed\n");
937 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
951 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
952 int npages, u64 length, int access_flags)
955 atomic_add(npages, &dev->mdev->priv.reg_pages);
956 mr->ibmr.lkey = mr->mmkey.key;
957 mr->ibmr.rkey = mr->mmkey.key;
958 mr->ibmr.length = length;
959 mr->access_flags = access_flags;
962 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
963 u64 virt_addr, int access_flags,
964 struct ib_udata *udata)
966 struct mlx5_ib_dev *dev = to_mdev(pd->device);
967 struct mlx5_ib_mr *mr = NULL;
968 struct ib_umem *umem;
975 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
976 (long long)start, (long long)virt_addr, (long long)length, access_flags);
977 umem = mr_umem_get(pd, start, length, access_flags, &npages,
978 &page_shift, &ncont, &order);
983 if (use_umr(order)) {
984 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
985 order, access_flags);
986 if (PTR_ERR(mr) == -EAGAIN) {
987 mlx5_ib_dbg(dev, "cache empty for order %d", order);
990 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
992 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
997 mutex_lock(&dev->slow_path_mutex);
998 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
999 page_shift, access_flags);
1000 mutex_unlock(&dev->slow_path_mutex);
1008 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1011 set_mr_fileds(dev, mr, npages, length, access_flags);
1013 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1020 ib_umem_release(umem);
1021 return ERR_PTR(err);
1024 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1026 struct mlx5_core_dev *mdev = dev->mdev;
1027 struct umr_common *umrc = &dev->umrc;
1028 struct mlx5_ib_umr_context umr_context;
1029 struct mlx5_umr_wr umrwr = {};
1030 struct ib_send_wr *bad;
1033 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1036 mlx5_ib_init_umr_context(&umr_context);
1038 umrwr.wr.wr_cqe = &umr_context.cqe;
1039 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
1042 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1045 mlx5_ib_dbg(dev, "err %d\n", err);
1048 wait_for_completion(&umr_context.done);
1051 if (umr_context.status != IB_WC_SUCCESS) {
1052 mlx5_ib_warn(dev, "unreg umr failed\n");
1062 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1063 u64 length, int npages, int page_shift, int order,
1064 int access_flags, int flags)
1066 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1067 struct device *ddev = dev->ib_dev.dma_device;
1068 struct mlx5_ib_umr_context umr_context;
1069 struct ib_send_wr *bad;
1070 struct mlx5_umr_wr umrwr = {};
1072 struct umr_common *umrc = &dev->umrc;
1074 __be64 *mr_pas = NULL;
1078 mlx5_ib_init_umr_context(&umr_context);
1080 umrwr.wr.wr_cqe = &umr_context.cqe;
1081 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1083 if (flags & IB_MR_REREG_TRANS) {
1084 err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
1089 umrwr.target.virt_addr = virt_addr;
1090 umrwr.length = length;
1091 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1094 prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
1097 if (flags & IB_MR_REREG_PD) {
1099 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
1102 if (flags & IB_MR_REREG_ACCESS) {
1103 umrwr.access_flags = access_flags;
1104 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1107 /* post send request to UMR QP */
1109 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1112 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
1114 wait_for_completion(&umr_context.done);
1115 if (umr_context.status != IB_WC_SUCCESS) {
1116 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
1117 umr_context.status);
1123 if (flags & IB_MR_REREG_TRANS) {
1124 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1130 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1131 u64 length, u64 virt_addr, int new_access_flags,
1132 struct ib_pd *new_pd, struct ib_udata *udata)
1134 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1135 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1136 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1137 int access_flags = flags & IB_MR_REREG_ACCESS ?
1140 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1141 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1148 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1149 (long long)start, (long long)virt_addr, (long long)length, access_flags);
1151 if (flags != IB_MR_REREG_PD) {
1153 * Replace umem. This needs to be done whether or not UMR is
1156 flags |= IB_MR_REREG_TRANS;
1157 ib_umem_release(mr->umem);
1158 mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
1159 &page_shift, &ncont, &order);
1160 if (IS_ERR(mr->umem)) {
1161 err = PTR_ERR(mr->umem);
1167 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1169 * UMR can't be used - MKey needs to be replaced.
1172 err = unreg_umr(dev, mr);
1174 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1176 err = destroy_mkey(dev, mr);
1178 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1183 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1184 page_shift, access_flags);
1194 err = rereg_umr(pd, mr, addr, len, npages, page_shift,
1195 order, access_flags, flags);
1197 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1202 if (flags & IB_MR_REREG_PD) {
1204 mr->mmkey.pd = to_mpd(pd)->pdn;
1207 if (flags & IB_MR_REREG_ACCESS)
1208 mr->access_flags = access_flags;
1210 if (flags & IB_MR_REREG_TRANS) {
1211 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1212 set_mr_fileds(dev, mr, npages, len, access_flags);
1213 mr->mmkey.iova = addr;
1214 mr->mmkey.size = len;
1216 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1224 mlx5_alloc_priv_descs(struct ib_device *device,
1225 struct mlx5_ib_mr *mr,
1229 int size = ndescs * desc_size;
1233 add_size = max_t(int, MLX5_UMR_ALIGN - 1, 0);
1235 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1236 if (!mr->descs_alloc)
1239 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1241 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1242 size, DMA_TO_DEVICE);
1243 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1250 kfree(mr->descs_alloc);
1256 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1259 struct ib_device *device = mr->ibmr.device;
1260 int size = mr->max_descs * mr->desc_size;
1262 dma_unmap_single(device->dma_device, mr->desc_map,
1263 size, DMA_TO_DEVICE);
1264 kfree(mr->descs_alloc);
1269 static int clean_mr(struct mlx5_ib_mr *mr)
1271 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1272 int umred = mr->umred;
1276 if (mlx5_core_destroy_psv(dev->mdev,
1277 mr->sig->psv_memory.psv_idx))
1278 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1279 mr->sig->psv_memory.psv_idx);
1280 if (mlx5_core_destroy_psv(dev->mdev,
1281 mr->sig->psv_wire.psv_idx))
1282 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1283 mr->sig->psv_wire.psv_idx);
1288 mlx5_free_priv_descs(mr);
1291 err = destroy_mkey(dev, mr);
1293 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1294 mr->mmkey.key, err);
1298 err = unreg_umr(dev, mr);
1300 mlx5_ib_warn(dev, "failed unregister\n");
1303 free_cached_mr(dev, mr);
1312 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1314 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1315 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1316 int npages = mr->npages;
1317 struct ib_umem *umem = mr->umem;
1319 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1320 if (umem && umem->odp_data) {
1321 /* Prevent new page faults from succeeding */
1323 /* Wait for all running page-fault handlers to finish. */
1324 synchronize_srcu(&dev->mr_srcu);
1325 /* Destroy all page mappings */
1326 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1329 * We kill the umem before the MR for ODP,
1330 * so that there will not be any invalidations in
1331 * flight, looking at the *mr struct.
1333 ib_umem_release(umem);
1334 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1336 /* Avoid double-freeing the umem. */
1344 ib_umem_release(umem);
1345 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1351 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1352 enum ib_mr_type mr_type,
1355 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1356 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1357 int ndescs = ALIGN(max_num_sg, 4);
1358 struct mlx5_ib_mr *mr;
1363 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1365 return ERR_PTR(-ENOMEM);
1367 in = kzalloc(inlen, GFP_KERNEL);
1373 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1374 MLX5_SET(mkc, mkc, free, 1);
1375 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1376 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1377 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1379 if (mr_type == IB_MR_TYPE_MEM_REG) {
1380 mr->access_mode = MLX5_ACCESS_MODE_MTT;
1381 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1382 err = mlx5_alloc_priv_descs(pd->device, mr,
1383 ndescs, sizeof(u64));
1387 mr->desc_size = sizeof(u64);
1388 mr->max_descs = ndescs;
1389 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1390 mr->access_mode = MLX5_ACCESS_MODE_KLM;
1392 err = mlx5_alloc_priv_descs(pd->device, mr,
1393 ndescs, sizeof(struct mlx5_klm));
1396 mr->desc_size = sizeof(struct mlx5_klm);
1397 mr->max_descs = ndescs;
1398 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1401 MLX5_SET(mkc, mkc, bsf_en, 1);
1402 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1403 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1409 /* create mem & wire PSVs */
1410 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1415 mr->access_mode = MLX5_ACCESS_MODE_KLM;
1416 mr->sig->psv_memory.psv_idx = psv_index[0];
1417 mr->sig->psv_wire.psv_idx = psv_index[1];
1419 mr->sig->sig_status_checked = true;
1420 mr->sig->sig_err_exists = false;
1421 /* Next UMR, Arm SIGERR */
1422 ++mr->sig->sigerr_count;
1424 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1429 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1430 MLX5_SET(mkc, mkc, umr_en, 1);
1432 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey,
1433 (struct mlx5_create_mkey_mbox_in *)in,
1434 inlen, NULL, NULL, NULL);
1436 goto err_destroy_psv;
1438 mr->ibmr.lkey = mr->mmkey.key;
1439 mr->ibmr.rkey = mr->mmkey.key;
1447 if (mlx5_core_destroy_psv(dev->mdev,
1448 mr->sig->psv_memory.psv_idx))
1449 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1450 mr->sig->psv_memory.psv_idx);
1451 if (mlx5_core_destroy_psv(dev->mdev,
1452 mr->sig->psv_wire.psv_idx))
1453 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1454 mr->sig->psv_wire.psv_idx);
1456 mlx5_free_priv_descs(mr);
1463 return ERR_PTR(err);
1466 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1467 struct ib_udata *udata)
1469 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1470 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1471 struct mlx5_ib_mw *mw = NULL;
1476 struct mlx5_ib_alloc_mw req = {};
1479 __u32 response_length;
1482 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1484 return ERR_PTR(err);
1486 if (req.comp_mask || req.reserved1 || req.reserved2)
1487 return ERR_PTR(-EOPNOTSUPP);
1489 if (udata->inlen > sizeof(req) &&
1490 !ib_is_udata_cleared(udata, sizeof(req),
1491 udata->inlen - sizeof(req)))
1492 return ERR_PTR(-EOPNOTSUPP);
1494 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1496 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1497 in = kzalloc(inlen, GFP_KERNEL);
1503 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1505 MLX5_SET(mkc, mkc, free, 1);
1506 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1507 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1508 MLX5_SET(mkc, mkc, umr_en, 1);
1509 MLX5_SET(mkc, mkc, lr, 1);
1510 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_KLM);
1511 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1512 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1514 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey,
1515 (struct mlx5_create_mkey_mbox_in *)in,
1516 inlen, NULL, NULL, NULL);
1520 mw->ibmw.rkey = mw->mmkey.key;
1522 resp.response_length = min(offsetof(typeof(resp), response_length) +
1523 sizeof(resp.response_length), udata->outlen);
1524 if (resp.response_length) {
1525 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1527 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1538 return ERR_PTR(err);
1541 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1543 struct mlx5_ib_mw *mmw = to_mmw(mw);
1546 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1553 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1554 struct ib_mr_status *mr_status)
1556 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1559 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1560 pr_err("Invalid status check mask\n");
1565 mr_status->fail_status = 0;
1566 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1569 pr_err("signature status check requested on a non-signature enabled MR\n");
1573 mmr->sig->sig_status_checked = true;
1574 if (!mmr->sig->sig_err_exists)
1577 if (ibmr->lkey == mmr->sig->err_item.key)
1578 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1579 sizeof(mr_status->sig_err));
1581 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1582 mr_status->sig_err.sig_err_offset = 0;
1583 mr_status->sig_err.key = mmr->sig->err_item.key;
1586 mmr->sig->sig_err_exists = false;
1587 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1595 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1596 struct scatterlist *sgl,
1597 unsigned short sg_nents,
1598 unsigned int *sg_offset_p)
1600 struct scatterlist *sg = sgl;
1601 struct mlx5_klm *klms = mr->descs;
1602 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1603 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1606 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1607 mr->ibmr.length = 0;
1608 mr->ndescs = sg_nents;
1610 for_each_sg(sgl, sg, sg_nents, i) {
1611 if (unlikely(i > mr->max_descs))
1613 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1614 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1615 klms[i].key = cpu_to_be32(lkey);
1616 mr->ibmr.length += sg_dma_len(sg);
1622 *sg_offset_p = sg_offset;
1627 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1629 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1632 if (unlikely(mr->ndescs == mr->max_descs))
1636 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1641 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1642 unsigned int *sg_offset)
1644 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1649 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1650 mr->desc_size * mr->max_descs,
1653 if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
1654 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1656 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1659 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1660 mr->desc_size * mr->max_descs,