2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kref.h>
29 #include <linux/random.h>
30 #include <linux/delay.h>
31 #include <linux/sched.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_umem_odp.h>
34 #include <rdma/ib_verbs.h>
38 MAX_PENDING_REG_MR = 8,
41 #define MLX5_UMR_ALIGN 2048
42 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
43 static __be64 mlx5_ib_update_mtt_emergency_buffer[
44 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
45 __aligned(MLX5_UMR_ALIGN);
46 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
49 static int clean_mr(struct mlx5_ib_mr *mr);
51 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
53 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
55 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
56 /* Wait until all page fault handlers using the mr complete. */
57 synchronize_srcu(&dev->mr_srcu);
63 static int order2idx(struct mlx5_ib_dev *dev, int order)
65 struct mlx5_mr_cache *cache = &dev->cache;
67 if (order < cache->ent[0].order)
70 return order - cache->ent[0].order;
73 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
75 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
76 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
79 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
80 static void update_odp_mr(struct mlx5_ib_mr *mr)
82 if (mr->umem->odp_data) {
84 * This barrier prevents the compiler from moving the
85 * setting of umem->odp_data->private to point to our
86 * MR, before reg_umr finished, to ensure that the MR
87 * initialization have finished before starting to
88 * handle invalidations.
91 mr->umem->odp_data->private = mr;
93 * Make sure we will see the new
94 * umem->odp_data->private value in the invalidation
95 * routines, before we can get page faults on the
96 * MR. Page faults can happen once we put the MR in
97 * the tree, below this line. Without the barrier,
98 * there can be a fault handling and an invalidation
99 * before umem->odp_data->private == mr is visible to
100 * the invalidation handler.
107 static void reg_mr_callback(int status, void *context)
109 struct mlx5_ib_mr *mr = context;
110 struct mlx5_ib_dev *dev = mr->dev;
111 struct mlx5_mr_cache *cache = &dev->cache;
112 int c = order2idx(dev, mr->order);
113 struct mlx5_cache_ent *ent = &cache->ent[c];
116 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
119 spin_lock_irqsave(&ent->lock, flags);
121 spin_unlock_irqrestore(&ent->lock, flags);
123 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
126 mod_timer(&dev->delay_timer, jiffies + HZ);
130 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
131 key = dev->mdev->priv.mkey_key++;
132 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
133 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
135 cache->last_add = jiffies;
137 spin_lock_irqsave(&ent->lock, flags);
138 list_add_tail(&mr->list, &ent->head);
141 spin_unlock_irqrestore(&ent->lock, flags);
143 spin_lock_irqsave(&table->lock, flags);
144 err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mr->mmkey.key),
147 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
148 spin_unlock_irqrestore(&table->lock, flags);
151 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
153 struct mlx5_mr_cache *cache = &dev->cache;
154 struct mlx5_cache_ent *ent = &cache->ent[c];
155 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
156 struct mlx5_ib_mr *mr;
157 int npages = 1 << ent->order;
163 in = kzalloc(inlen, GFP_KERNEL);
167 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
168 for (i = 0; i < num; i++) {
169 if (ent->pending >= MAX_PENDING_REG_MR) {
174 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
179 mr->order = ent->order;
183 MLX5_SET(mkc, mkc, free, 1);
184 MLX5_SET(mkc, mkc, umr_en, 1);
185 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_MTT);
187 MLX5_SET(mkc, mkc, qpn, 0xffffff);
188 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
189 MLX5_SET(mkc, mkc, log_page_size, 12);
191 spin_lock_irq(&ent->lock);
193 spin_unlock_irq(&ent->lock);
194 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
196 mr->out, sizeof(mr->out),
197 reg_mr_callback, mr);
199 spin_lock_irq(&ent->lock);
201 spin_unlock_irq(&ent->lock);
202 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
212 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
214 struct mlx5_mr_cache *cache = &dev->cache;
215 struct mlx5_cache_ent *ent = &cache->ent[c];
216 struct mlx5_ib_mr *mr;
220 for (i = 0; i < num; i++) {
221 spin_lock_irq(&ent->lock);
222 if (list_empty(&ent->head)) {
223 spin_unlock_irq(&ent->lock);
226 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
230 spin_unlock_irq(&ent->lock);
231 err = destroy_mkey(dev, mr);
233 mlx5_ib_warn(dev, "failed destroy mkey\n");
239 static int someone_adding(struct mlx5_mr_cache *cache)
243 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
244 if (cache->ent[i].cur < cache->ent[i].limit)
251 static void __cache_work_func(struct mlx5_cache_ent *ent)
253 struct mlx5_ib_dev *dev = ent->dev;
254 struct mlx5_mr_cache *cache = &dev->cache;
255 int i = order2idx(dev, ent->order);
261 ent = &dev->cache.ent[i];
262 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
263 err = add_keys(dev, i, 1);
264 if (ent->cur < 2 * ent->limit) {
265 if (err == -EAGAIN) {
266 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
268 queue_delayed_work(cache->wq, &ent->dwork,
269 msecs_to_jiffies(3));
271 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
273 queue_delayed_work(cache->wq, &ent->dwork,
274 msecs_to_jiffies(1000));
276 queue_work(cache->wq, &ent->work);
279 } else if (ent->cur > 2 * ent->limit) {
281 * The remove_keys() logic is performed as garbage collection
282 * task. Such task is intended to be run when no other active
283 * processes are running.
285 * The need_resched() will return TRUE if there are user tasks
286 * to be activated in near future.
288 * In such case, we don't execute remove_keys() and postpone
289 * the garbage collection work to try to run in next cycle,
290 * in order to free CPU resources to other tasks.
292 if (!need_resched() && !someone_adding(cache) &&
293 time_after(jiffies, cache->last_add + 300 * HZ)) {
294 remove_keys(dev, i, 1);
295 if (ent->cur > ent->limit)
296 queue_work(cache->wq, &ent->work);
298 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
303 static void delayed_cache_work_func(struct work_struct *work)
305 struct mlx5_cache_ent *ent;
307 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
308 __cache_work_func(ent);
311 static void cache_work_func(struct work_struct *work)
313 struct mlx5_cache_ent *ent;
315 ent = container_of(work, struct mlx5_cache_ent, work);
316 __cache_work_func(ent);
319 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
321 struct mlx5_mr_cache *cache = &dev->cache;
322 struct mlx5_ib_mr *mr = NULL;
323 struct mlx5_cache_ent *ent;
327 c = order2idx(dev, order);
328 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
329 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
333 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
334 ent = &cache->ent[i];
336 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
338 spin_lock_irq(&ent->lock);
339 if (!list_empty(&ent->head)) {
340 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
344 spin_unlock_irq(&ent->lock);
345 if (ent->cur < ent->limit)
346 queue_work(cache->wq, &ent->work);
349 spin_unlock_irq(&ent->lock);
351 queue_work(cache->wq, &ent->work);
355 cache->ent[c].miss++;
360 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
362 struct mlx5_mr_cache *cache = &dev->cache;
363 struct mlx5_cache_ent *ent;
367 c = order2idx(dev, mr->order);
368 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
369 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
372 ent = &cache->ent[c];
373 spin_lock_irq(&ent->lock);
374 list_add_tail(&mr->list, &ent->head);
376 if (ent->cur > 2 * ent->limit)
378 spin_unlock_irq(&ent->lock);
381 queue_work(cache->wq, &ent->work);
384 static void clean_keys(struct mlx5_ib_dev *dev, int c)
386 struct mlx5_mr_cache *cache = &dev->cache;
387 struct mlx5_cache_ent *ent = &cache->ent[c];
388 struct mlx5_ib_mr *mr;
391 cancel_delayed_work(&ent->dwork);
393 spin_lock_irq(&ent->lock);
394 if (list_empty(&ent->head)) {
395 spin_unlock_irq(&ent->lock);
398 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
402 spin_unlock_irq(&ent->lock);
403 err = destroy_mkey(dev, mr);
405 mlx5_ib_warn(dev, "failed destroy mkey\n");
411 static void delay_time_func(unsigned long ctx)
413 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
418 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
420 struct mlx5_mr_cache *cache = &dev->cache;
421 struct mlx5_cache_ent *ent;
425 mutex_init(&dev->slow_path_mutex);
426 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
428 mlx5_ib_warn(dev, "failed to create work queue\n");
432 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
433 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
434 INIT_LIST_HEAD(&cache->ent[i].head);
435 spin_lock_init(&cache->ent[i].lock);
437 ent = &cache->ent[i];
438 INIT_LIST_HEAD(&ent->head);
439 spin_lock_init(&ent->lock);
443 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
444 limit = dev->mdev->profile->mr_cache[i].limit;
448 INIT_WORK(&ent->work, cache_work_func);
449 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
451 queue_work(cache->wq, &ent->work);
457 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
461 dev->cache.stopped = 1;
462 flush_workqueue(dev->cache.wq);
464 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
467 destroy_workqueue(dev->cache.wq);
468 del_timer_sync(&dev->delay_timer);
473 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
475 struct mlx5_ib_dev *dev = to_mdev(pd->device);
476 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
477 struct mlx5_core_dev *mdev = dev->mdev;
478 struct mlx5_ib_mr *mr;
483 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
485 return ERR_PTR(-ENOMEM);
487 in = kzalloc(inlen, GFP_KERNEL);
493 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
495 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
496 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
497 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
498 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
499 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
500 MLX5_SET(mkc, mkc, lr, 1);
502 MLX5_SET(mkc, mkc, length64, 1);
503 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
504 MLX5_SET(mkc, mkc, qpn, 0xffffff);
505 MLX5_SET64(mkc, mkc, start_addr, 0);
507 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
512 mr->ibmr.lkey = mr->mmkey.key;
513 mr->ibmr.rkey = mr->mmkey.key;
527 static int get_octo_len(u64 addr, u64 len, int page_size)
532 offset = addr & (page_size - 1);
533 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
534 return (npages + 1) / 2;
537 static int use_umr(int order)
539 return order <= MLX5_MAX_UMR_SHIFT;
542 static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
543 int npages, int page_shift, int *size,
544 __be64 **mr_pas, dma_addr_t *dma)
547 struct device *ddev = dev->ib_dev.dma_device;
550 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
551 * To avoid copying garbage after the pas array, we allocate
554 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
555 *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
559 pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
560 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
561 /* Clear padding after the actual pages. */
562 memset(pas + npages, 0, *size - npages * sizeof(u64));
564 *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
565 if (dma_mapping_error(ddev, *dma)) {
573 static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
574 struct ib_sge *sg, u64 dma, int n, u32 key,
577 struct mlx5_ib_dev *dev = to_mdev(pd->device);
578 struct mlx5_umr_wr *umrwr = umr_wr(wr);
581 sg->length = ALIGN(sizeof(u64) * n, 64);
582 sg->lkey = dev->umrc.pd->local_dma_lkey;
591 wr->opcode = MLX5_IB_WR_UMR;
594 umrwr->page_shift = page_shift;
598 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
599 struct ib_sge *sg, u64 dma, int n, u32 key,
600 int page_shift, u64 virt_addr, u64 len,
603 struct mlx5_umr_wr *umrwr = umr_wr(wr);
605 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
609 umrwr->target.virt_addr = virt_addr;
611 umrwr->access_flags = access_flags;
615 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
616 struct ib_send_wr *wr, u32 key)
618 struct mlx5_umr_wr *umrwr = umr_wr(wr);
620 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
621 wr->opcode = MLX5_IB_WR_UMR;
625 static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
626 int access_flags, int *npages,
627 int *page_shift, int *ncont, int *order)
629 struct mlx5_ib_dev *dev = to_mdev(pd->device);
630 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
633 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
637 mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
639 mlx5_ib_warn(dev, "avoid zero region\n");
640 ib_umem_release(umem);
641 return ERR_PTR(-EINVAL);
644 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
645 *npages, *ncont, *order, *page_shift);
650 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
652 struct mlx5_ib_umr_context *context =
653 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
655 context->status = wc->status;
656 complete(&context->done);
659 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
661 context->cqe.done = mlx5_ib_umr_done;
662 context->status = -1;
663 init_completion(&context->done);
666 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
667 u64 virt_addr, u64 len, int npages,
668 int page_shift, int order, int access_flags)
670 struct mlx5_ib_dev *dev = to_mdev(pd->device);
671 struct device *ddev = dev->ib_dev.dma_device;
672 struct umr_common *umrc = &dev->umrc;
673 struct mlx5_ib_umr_context umr_context;
674 struct mlx5_umr_wr umrwr = {};
675 struct ib_send_wr *bad;
676 struct mlx5_ib_mr *mr;
684 for (i = 0; i < 1; i++) {
685 mr = alloc_cached_mr(dev, order);
689 err = add_keys(dev, order2idx(dev, order), 1);
690 if (err && err != -EAGAIN) {
691 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
697 return ERR_PTR(-EAGAIN);
699 err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
704 mlx5_ib_init_umr_context(&umr_context);
706 umrwr.wr.wr_cqe = &umr_context.cqe;
707 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
708 page_shift, virt_addr, len, access_flags);
711 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
713 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
716 wait_for_completion(&umr_context.done);
717 if (umr_context.status != IB_WC_SUCCESS) {
718 mlx5_ib_warn(dev, "reg umr failed\n");
723 mr->mmkey.iova = virt_addr;
724 mr->mmkey.size = len;
725 mr->mmkey.pd = to_mpd(pd)->pdn;
731 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
737 free_cached_mr(dev, mr);
744 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
745 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
748 struct mlx5_ib_dev *dev = mr->dev;
749 struct device *ddev = dev->ib_dev.dma_device;
750 struct umr_common *umrc = &dev->umrc;
751 struct mlx5_ib_umr_context umr_context;
752 struct ib_umem *umem = mr->umem;
756 struct ib_send_wr *bad;
757 struct mlx5_umr_wr wr;
760 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
761 const int page_index_mask = page_index_alignment - 1;
762 size_t pages_mapped = 0;
763 size_t pages_to_map = 0;
764 size_t pages_iter = 0;
765 int use_emergency_buf = 0;
767 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
768 * so we need to align the offset and length accordingly */
769 if (start_page_index & page_index_mask) {
770 npages += start_page_index & page_index_mask;
771 start_page_index &= ~page_index_mask;
774 pages_to_map = ALIGN(npages, page_index_alignment);
776 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
779 size = sizeof(u64) * pages_to_map;
780 size = min_t(int, PAGE_SIZE, size);
781 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
782 * code, when we are called from an invalidation. The pas buffer must
783 * be 2k-aligned for Connect-IB. */
784 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
786 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
787 pas = mlx5_ib_update_mtt_emergency_buffer;
788 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
789 use_emergency_buf = 1;
790 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
791 memset(pas, 0, size);
793 pages_iter = size / sizeof(u64);
794 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
795 if (dma_mapping_error(ddev, dma)) {
796 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
801 for (pages_mapped = 0;
802 pages_mapped < pages_to_map && !err;
803 pages_mapped += pages_iter, start_page_index += pages_iter) {
804 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
806 npages = min_t(size_t,
808 ib_umem_num_pages(umem) - start_page_index);
811 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
812 start_page_index, npages, pas,
813 MLX5_IB_MTT_PRESENT);
814 /* Clear padding after the pages brought from the
816 memset(pas + npages, 0, size - npages * sizeof(u64));
819 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
821 mlx5_ib_init_umr_context(&umr_context);
823 memset(&wr, 0, sizeof(wr));
824 wr.wr.wr_cqe = &umr_context.cqe;
827 sg.length = ALIGN(npages * sizeof(u64),
828 MLX5_UMR_MTT_ALIGNMENT);
829 sg.lkey = dev->umrc.pd->local_dma_lkey;
831 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
832 MLX5_IB_SEND_UMR_UPDATE_MTT;
835 wr.wr.opcode = MLX5_IB_WR_UMR;
836 wr.npages = sg.length / sizeof(u64);
837 wr.page_shift = PAGE_SHIFT;
838 wr.mkey = mr->mmkey.key;
839 wr.target.offset = start_page_index;
842 err = ib_post_send(umrc->qp, &wr.wr, &bad);
844 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
846 wait_for_completion(&umr_context.done);
847 if (umr_context.status != IB_WC_SUCCESS) {
848 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
855 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
858 if (!use_emergency_buf)
859 free_page((unsigned long)pas);
861 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
868 * If ibmr is NULL it will be allocated by reg_create.
869 * Else, the given ibmr will be used.
871 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
872 u64 virt_addr, u64 length,
873 struct ib_umem *umem, int npages,
874 int page_shift, int access_flags)
876 struct mlx5_ib_dev *dev = to_mdev(pd->device);
877 struct mlx5_ib_mr *mr;
883 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
885 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
887 return ERR_PTR(-ENOMEM);
889 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
890 sizeof(*pas) * ((npages + 1) / 2) * 2;
891 in = mlx5_vzalloc(inlen);
896 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
897 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
898 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
900 /* The pg_access bit allows setting the access flags
901 * in the page list submitted with the command. */
902 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
904 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
905 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_MTT);
906 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
907 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
908 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
909 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
910 MLX5_SET(mkc, mkc, lr, 1);
912 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
913 MLX5_SET64(mkc, mkc, len, length);
914 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
915 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
916 MLX5_SET(mkc, mkc, translations_octword_size,
917 get_octo_len(virt_addr, length, 1 << page_shift));
918 MLX5_SET(mkc, mkc, log_page_size, page_shift);
919 MLX5_SET(mkc, mkc, qpn, 0xffffff);
920 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
921 get_octo_len(virt_addr, length, 1 << page_shift));
923 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
925 mlx5_ib_warn(dev, "create mkey failed\n");
933 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
947 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
948 int npages, u64 length, int access_flags)
951 atomic_add(npages, &dev->mdev->priv.reg_pages);
952 mr->ibmr.lkey = mr->mmkey.key;
953 mr->ibmr.rkey = mr->mmkey.key;
954 mr->ibmr.length = length;
955 mr->access_flags = access_flags;
958 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
959 u64 virt_addr, int access_flags,
960 struct ib_udata *udata)
962 struct mlx5_ib_dev *dev = to_mdev(pd->device);
963 struct mlx5_ib_mr *mr = NULL;
964 struct ib_umem *umem;
971 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
972 (long long)start, (long long)virt_addr, (long long)length, access_flags);
973 umem = mr_umem_get(pd, start, length, access_flags, &npages,
974 &page_shift, &ncont, &order);
979 if (use_umr(order)) {
980 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
981 order, access_flags);
982 if (PTR_ERR(mr) == -EAGAIN) {
983 mlx5_ib_dbg(dev, "cache empty for order %d", order);
986 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
988 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
993 mutex_lock(&dev->slow_path_mutex);
994 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
995 page_shift, access_flags);
996 mutex_unlock(&dev->slow_path_mutex);
1004 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1007 set_mr_fileds(dev, mr, npages, length, access_flags);
1009 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1016 ib_umem_release(umem);
1017 return ERR_PTR(err);
1020 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1022 struct mlx5_core_dev *mdev = dev->mdev;
1023 struct umr_common *umrc = &dev->umrc;
1024 struct mlx5_ib_umr_context umr_context;
1025 struct mlx5_umr_wr umrwr = {};
1026 struct ib_send_wr *bad;
1029 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1032 mlx5_ib_init_umr_context(&umr_context);
1034 umrwr.wr.wr_cqe = &umr_context.cqe;
1035 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
1038 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1041 mlx5_ib_dbg(dev, "err %d\n", err);
1044 wait_for_completion(&umr_context.done);
1047 if (umr_context.status != IB_WC_SUCCESS) {
1048 mlx5_ib_warn(dev, "unreg umr failed\n");
1058 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1059 u64 length, int npages, int page_shift, int order,
1060 int access_flags, int flags)
1062 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1063 struct device *ddev = dev->ib_dev.dma_device;
1064 struct mlx5_ib_umr_context umr_context;
1065 struct ib_send_wr *bad;
1066 struct mlx5_umr_wr umrwr = {};
1068 struct umr_common *umrc = &dev->umrc;
1070 __be64 *mr_pas = NULL;
1074 mlx5_ib_init_umr_context(&umr_context);
1076 umrwr.wr.wr_cqe = &umr_context.cqe;
1077 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1079 if (flags & IB_MR_REREG_TRANS) {
1080 err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
1085 umrwr.target.virt_addr = virt_addr;
1086 umrwr.length = length;
1087 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1090 prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
1093 if (flags & IB_MR_REREG_PD) {
1095 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
1098 if (flags & IB_MR_REREG_ACCESS) {
1099 umrwr.access_flags = access_flags;
1100 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1103 /* post send request to UMR QP */
1105 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1108 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
1110 wait_for_completion(&umr_context.done);
1111 if (umr_context.status != IB_WC_SUCCESS) {
1112 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
1113 umr_context.status);
1119 if (flags & IB_MR_REREG_TRANS) {
1120 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1126 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1127 u64 length, u64 virt_addr, int new_access_flags,
1128 struct ib_pd *new_pd, struct ib_udata *udata)
1130 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1131 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1132 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1133 int access_flags = flags & IB_MR_REREG_ACCESS ?
1136 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1137 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1144 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1145 (long long)start, (long long)virt_addr, (long long)length, access_flags);
1147 if (flags != IB_MR_REREG_PD) {
1149 * Replace umem. This needs to be done whether or not UMR is
1152 flags |= IB_MR_REREG_TRANS;
1153 ib_umem_release(mr->umem);
1154 mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
1155 &page_shift, &ncont, &order);
1156 if (IS_ERR(mr->umem)) {
1157 err = PTR_ERR(mr->umem);
1163 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1165 * UMR can't be used - MKey needs to be replaced.
1168 err = unreg_umr(dev, mr);
1170 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1172 err = destroy_mkey(dev, mr);
1174 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1179 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1180 page_shift, access_flags);
1190 err = rereg_umr(pd, mr, addr, len, npages, page_shift,
1191 order, access_flags, flags);
1193 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1198 if (flags & IB_MR_REREG_PD) {
1200 mr->mmkey.pd = to_mpd(pd)->pdn;
1203 if (flags & IB_MR_REREG_ACCESS)
1204 mr->access_flags = access_flags;
1206 if (flags & IB_MR_REREG_TRANS) {
1207 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1208 set_mr_fileds(dev, mr, npages, len, access_flags);
1209 mr->mmkey.iova = addr;
1210 mr->mmkey.size = len;
1212 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1220 mlx5_alloc_priv_descs(struct ib_device *device,
1221 struct mlx5_ib_mr *mr,
1225 int size = ndescs * desc_size;
1229 add_size = max_t(int, MLX5_UMR_ALIGN - 1, 0);
1231 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1232 if (!mr->descs_alloc)
1235 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1237 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1238 size, DMA_TO_DEVICE);
1239 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1246 kfree(mr->descs_alloc);
1252 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1255 struct ib_device *device = mr->ibmr.device;
1256 int size = mr->max_descs * mr->desc_size;
1258 dma_unmap_single(device->dma_device, mr->desc_map,
1259 size, DMA_TO_DEVICE);
1260 kfree(mr->descs_alloc);
1265 static int clean_mr(struct mlx5_ib_mr *mr)
1267 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1268 int umred = mr->umred;
1272 if (mlx5_core_destroy_psv(dev->mdev,
1273 mr->sig->psv_memory.psv_idx))
1274 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1275 mr->sig->psv_memory.psv_idx);
1276 if (mlx5_core_destroy_psv(dev->mdev,
1277 mr->sig->psv_wire.psv_idx))
1278 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1279 mr->sig->psv_wire.psv_idx);
1284 mlx5_free_priv_descs(mr);
1287 err = destroy_mkey(dev, mr);
1289 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1290 mr->mmkey.key, err);
1294 err = unreg_umr(dev, mr);
1296 mlx5_ib_warn(dev, "failed unregister\n");
1299 free_cached_mr(dev, mr);
1308 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1310 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1311 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1312 int npages = mr->npages;
1313 struct ib_umem *umem = mr->umem;
1315 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1316 if (umem && umem->odp_data) {
1317 /* Prevent new page faults from succeeding */
1319 /* Wait for all running page-fault handlers to finish. */
1320 synchronize_srcu(&dev->mr_srcu);
1321 /* Destroy all page mappings */
1322 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1325 * We kill the umem before the MR for ODP,
1326 * so that there will not be any invalidations in
1327 * flight, looking at the *mr struct.
1329 ib_umem_release(umem);
1330 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1332 /* Avoid double-freeing the umem. */
1340 ib_umem_release(umem);
1341 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1347 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1348 enum ib_mr_type mr_type,
1351 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1352 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1353 int ndescs = ALIGN(max_num_sg, 4);
1354 struct mlx5_ib_mr *mr;
1359 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1361 return ERR_PTR(-ENOMEM);
1363 in = kzalloc(inlen, GFP_KERNEL);
1369 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1370 MLX5_SET(mkc, mkc, free, 1);
1371 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1372 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1373 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1375 if (mr_type == IB_MR_TYPE_MEM_REG) {
1376 mr->access_mode = MLX5_ACCESS_MODE_MTT;
1377 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1378 err = mlx5_alloc_priv_descs(pd->device, mr,
1379 ndescs, sizeof(u64));
1383 mr->desc_size = sizeof(u64);
1384 mr->max_descs = ndescs;
1385 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1386 mr->access_mode = MLX5_ACCESS_MODE_KLM;
1388 err = mlx5_alloc_priv_descs(pd->device, mr,
1389 ndescs, sizeof(struct mlx5_klm));
1392 mr->desc_size = sizeof(struct mlx5_klm);
1393 mr->max_descs = ndescs;
1394 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1397 MLX5_SET(mkc, mkc, bsf_en, 1);
1398 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1399 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1405 /* create mem & wire PSVs */
1406 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1411 mr->access_mode = MLX5_ACCESS_MODE_KLM;
1412 mr->sig->psv_memory.psv_idx = psv_index[0];
1413 mr->sig->psv_wire.psv_idx = psv_index[1];
1415 mr->sig->sig_status_checked = true;
1416 mr->sig->sig_err_exists = false;
1417 /* Next UMR, Arm SIGERR */
1418 ++mr->sig->sigerr_count;
1420 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1425 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1426 MLX5_SET(mkc, mkc, umr_en, 1);
1428 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1430 goto err_destroy_psv;
1432 mr->ibmr.lkey = mr->mmkey.key;
1433 mr->ibmr.rkey = mr->mmkey.key;
1441 if (mlx5_core_destroy_psv(dev->mdev,
1442 mr->sig->psv_memory.psv_idx))
1443 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1444 mr->sig->psv_memory.psv_idx);
1445 if (mlx5_core_destroy_psv(dev->mdev,
1446 mr->sig->psv_wire.psv_idx))
1447 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1448 mr->sig->psv_wire.psv_idx);
1450 mlx5_free_priv_descs(mr);
1457 return ERR_PTR(err);
1460 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1461 struct ib_udata *udata)
1463 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1464 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1465 struct mlx5_ib_mw *mw = NULL;
1470 struct mlx5_ib_alloc_mw req = {};
1473 __u32 response_length;
1476 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1478 return ERR_PTR(err);
1480 if (req.comp_mask || req.reserved1 || req.reserved2)
1481 return ERR_PTR(-EOPNOTSUPP);
1483 if (udata->inlen > sizeof(req) &&
1484 !ib_is_udata_cleared(udata, sizeof(req),
1485 udata->inlen - sizeof(req)))
1486 return ERR_PTR(-EOPNOTSUPP);
1488 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1490 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1491 in = kzalloc(inlen, GFP_KERNEL);
1497 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1499 MLX5_SET(mkc, mkc, free, 1);
1500 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1501 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1502 MLX5_SET(mkc, mkc, umr_en, 1);
1503 MLX5_SET(mkc, mkc, lr, 1);
1504 MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_KLM);
1505 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1506 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1508 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1512 mw->ibmw.rkey = mw->mmkey.key;
1514 resp.response_length = min(offsetof(typeof(resp), response_length) +
1515 sizeof(resp.response_length), udata->outlen);
1516 if (resp.response_length) {
1517 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1519 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1530 return ERR_PTR(err);
1533 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1535 struct mlx5_ib_mw *mmw = to_mmw(mw);
1538 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1545 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1546 struct ib_mr_status *mr_status)
1548 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1551 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1552 pr_err("Invalid status check mask\n");
1557 mr_status->fail_status = 0;
1558 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1561 pr_err("signature status check requested on a non-signature enabled MR\n");
1565 mmr->sig->sig_status_checked = true;
1566 if (!mmr->sig->sig_err_exists)
1569 if (ibmr->lkey == mmr->sig->err_item.key)
1570 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1571 sizeof(mr_status->sig_err));
1573 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1574 mr_status->sig_err.sig_err_offset = 0;
1575 mr_status->sig_err.key = mmr->sig->err_item.key;
1578 mmr->sig->sig_err_exists = false;
1579 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1587 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1588 struct scatterlist *sgl,
1589 unsigned short sg_nents,
1590 unsigned int *sg_offset_p)
1592 struct scatterlist *sg = sgl;
1593 struct mlx5_klm *klms = mr->descs;
1594 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1595 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1598 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1599 mr->ibmr.length = 0;
1600 mr->ndescs = sg_nents;
1602 for_each_sg(sgl, sg, sg_nents, i) {
1603 if (unlikely(i > mr->max_descs))
1605 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1606 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1607 klms[i].key = cpu_to_be32(lkey);
1608 mr->ibmr.length += sg_dma_len(sg);
1614 *sg_offset_p = sg_offset;
1619 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1621 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1624 if (unlikely(mr->ndescs == mr->max_descs))
1628 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1633 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1634 unsigned int *sg_offset)
1636 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1641 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1642 mr->desc_size * mr->max_descs,
1645 if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
1646 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1648 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1651 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1652 mr->desc_size * mr->max_descs,