2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_addr.h>
36 #include <dev/mlx5/device.h>
37 #include <dev/mlx5/driver.h>
38 #include <dev/mlx5/cq.h>
39 #include <dev/mlx5/qp.h>
40 #include <dev/mlx5/srq.h>
41 #include <linux/types.h>
42 #include <dev/mlx5/mlx5_core/transobj.h>
44 #define mlx5_ib_dbg(dev, format, arg...) \
45 pr_debug("mlx5_dbg:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
46 __LINE__, curthread->td_proc->p_pid, ##arg)
48 #define mlx5_ib_err(dev, format, arg...) \
49 printf("mlx5_ib: ERR: ""mlx5_err:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
50 __LINE__, curthread->td_proc->p_pid, ##arg)
52 #define mlx5_ib_warn(dev, format, arg...) \
53 printf("mlx5_ib: WARN: ""mlx5_warn:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
54 __LINE__, curthread->td_proc->p_pid, ##arg)
57 extern struct workqueue_struct *mlx5_ib_wq;
60 MLX5_IB_MMAP_CMD_SHIFT = 8,
61 MLX5_IB_MMAP_CMD_MASK = 0xff,
64 enum mlx5_ib_mmap_cmd {
65 MLX5_IB_MMAP_REGULAR_PAGE = 0,
66 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
67 MLX5_IB_MMAP_WC_PAGE = 2,
68 MLX5_IB_MMAP_NC_PAGE = 3,
69 MLX5_IB_MMAP_MAP_DC_INFO_PAGE = 4,
71 /* Use EXP mmap commands until it is pushed to upstream */
72 MLX5_IB_EXP_MMAP_CORE_CLOCK = 0xFB,
73 MLX5_IB_EXP_MMAP_GET_CONTIGUOUS_PAGES_CPU_NUMA = 0xFC,
74 MLX5_IB_EXP_MMAP_GET_CONTIGUOUS_PAGES_DEV_NUMA = 0xFD,
75 MLX5_IB_EXP_ALLOC_N_MMAP_WC = 0xFE,
79 MLX5_RES_SCAT_DATA32_CQE = 0x1,
80 MLX5_RES_SCAT_DATA64_CQE = 0x2,
81 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
82 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
86 MLX5_DCT_CS_RES_64 = 2,
87 MLX5_CNAK_RX_POLL_CQ_QUOTA = 256,
90 enum mlx5_ib_latency_class {
91 MLX5_IB_LATENCY_CLASS_LOW,
92 MLX5_IB_LATENCY_CLASS_MEDIUM,
93 MLX5_IB_LATENCY_CLASS_HIGH,
94 MLX5_IB_LATENCY_CLASS_FAST_PATH
97 enum mlx5_ib_mad_ifc_flags {
98 MLX5_MAD_IFC_IGNORE_MKEY = 1,
99 MLX5_MAD_IFC_IGNORE_BKEY = 2,
100 MLX5_MAD_IFC_NET_VIEW = 4,
104 MLX5_CROSS_CHANNEL_UUAR = 0,
108 MLX5_IB_MAX_CTX_DYNAMIC_UARS = 256,
109 MLX5_IB_INVALID_UAR_INDEX = -1U
113 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES = 13,
114 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES = 6,
115 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES = 16,
116 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES = 9,
119 struct mlx5_ib_ucontext {
120 struct ib_ucontext ibucontext;
121 struct list_head db_page_list;
123 /* protect doorbell record alloc/free
125 struct mutex db_page_mutex;
126 struct mlx5_uuar_info uuari;
127 u32 dynamic_wc_uar_index[MLX5_IB_MAX_CTX_DYNAMIC_UARS];
128 /* Transport Domain number */
132 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
134 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
148 struct mlx5_swr_ctx {
151 struct wr_list w_list;
157 struct mlx5_rwr_ctx {
163 struct mlx5_swr_ctx *swr_ctx;
164 struct mlx5_rwr_ctx *rwr_ctx;
168 /* serialize post to the work queue
195 struct mlx5_ib_qp *qp;
196 struct work_struct work;
199 struct mlx5_ib_mc_flows_list {
200 struct list_head flows_list;
201 /*Protect the flows_list*/
207 struct mlx5_core_qp mqp;
208 struct mlx5_core_qp mrq;
209 struct mlx5_core_qp msq;
215 struct mlx5_ib_wq rq;
220 int sq_max_wqes_per_wr;
222 struct mlx5_ib_wq sq;
224 struct ib_umem *umem;
226 /* Raw Ethernet QP's SQ is allocated seperately
227 * from the RQ's buffer in user-space.
229 struct ib_umem *sq_umem;
234 /* serialize qp state modifications
244 /* Raw Ethernet QP's SQ and RQ states */
254 /* only for user space QPs. For kernel
255 * we have it from the bf object
262 /* Store signature errors */
265 struct list_head qps_list;
266 struct list_head cq_recv_list;
267 struct list_head cq_send_list;
269 struct mlx5_ib_mc_flows_list mc_flows_list;
272 struct mlx5_ib_cq_buf {
274 struct ib_umem *umem;
279 enum mlx5_ib_qp_flags {
280 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
281 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
282 MLX5_IB_QP_CAP_RX_END_PADDING = 1 << 5,
291 unsigned int page_shift;
298 struct mlx5_shared_mr_info {
300 struct ib_umem *umem;
305 struct mlx5_core_cq mcq;
306 struct mlx5_ib_cq_buf buf;
309 /* serialize access to the CQ
315 struct mutex resize_mutex;
316 struct mlx5_ib_cq_buf *resize_buf;
317 struct ib_umem *resize_umem;
319 struct list_head list_send_qp;
320 struct list_head list_recv_qp;
325 struct mlx5_core_srq msrq;
329 /* protect SRQ hanlding
335 struct ib_umem *umem;
336 /* serialize arming a SRQ
342 struct mlx5_ib_xrcd {
343 struct ib_xrcd ibxrcd;
347 enum mlx5_ib_mtt_access_flags {
348 MLX5_IB_MTT_READ = (1 << 0),
349 MLX5_IB_MTT_WRITE = (1 << 1),
352 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
356 struct mlx5_core_mr mmr;
357 struct ib_umem *umem;
358 struct mlx5_shared_mr_info *smr_info;
359 struct list_head list;
364 struct mlx5_ib_dev *dev;
365 struct mlx5_create_mkey_mbox_out out;
366 struct mlx5_core_sig_ctx *sig;
367 u32 max_reg_descriptors;
370 struct mlx5_ib_mr **children;
374 struct mlx5_ib_fast_reg_page_list {
375 struct ib_fast_reg_page_list ibfrpl;
376 __be64 *mapped_page_list;
380 struct mlx5_ib_umr_context {
381 enum ib_wc_status status;
382 struct completion done;
385 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
387 context->status = -1;
388 init_completion(&context->done);
404 struct mlx5_core_mr mr;
411 struct ib_send_wr wr[2];
413 struct ib_fast_reg_page_list page_list;
420 struct mlx5_ib_dev *dev;
423 struct mlx5_cache_ent {
424 struct list_head head;
425 /* sync access to the cahce entry
436 struct mlx5_ib_dev *dev;
437 struct work_struct work;
438 struct delayed_work dwork;
440 struct cache_order co;
443 struct mlx5_mr_cache {
444 struct workqueue_struct *wq;
445 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
453 struct mlx5_ib_resources {
462 struct mlx5_dc_tracer {
469 struct mlx5_dc_desc {
484 struct mlx5_send_wr {
485 struct ib_send_wr wr;
487 struct mlx5_mlx_wr mlx;
491 struct mlx5_dc_data {
496 unsigned int rx_npages;
497 unsigned int tx_npages;
498 struct mlx5_dc_desc *rxdesc;
499 struct mlx5_dc_desc *txdesc;
500 unsigned int max_wqes;
501 unsigned int cur_send;
502 unsigned int last_send_completed;
504 struct mlx5_ib_dev *dev;
508 unsigned long connects;
510 unsigned long discards;
511 struct ib_wc wc_tbl[MLX5_CNAK_RX_POLL_CQ_QUOTA];
514 struct mlx5_ib_port_sysfs_group {
517 struct attribute_group counters;
520 #define MLX5_IB_GID_MAX 16
522 struct mlx5_ib_port {
523 struct mlx5_ib_dev *dev;
524 u8 port_num; /* 0 based */
525 u8 port_gone; /* set when gone */
527 struct mlx5_ib_port_sysfs_group group;
528 union ib_gid gid_table[MLX5_IB_GID_MAX];
532 struct ib_device ib_dev;
533 struct mlx5_core_dev *mdev;
534 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
536 /* serialize update of capability mask
538 struct mutex cap_mask_mutex;
540 struct umr_common umrc;
541 /* sync used page count stats
543 struct mlx5_ib_resources devr;
544 struct mutex slow_path_mutex;
545 int enable_atomic_resp;
546 enum ib_atomic_cap atomic_cap;
547 struct mlx5_mr_cache cache;
548 struct kobject mr_cache;
549 /* protect resources needed as part of reset flow */
550 spinlock_t reset_flow_resource_lock;
551 struct list_head qp_list;
552 struct timer_list delay_timer;
554 struct mlx5_dc_tracer dctr;
555 struct mlx5_dc_data dcd[MLX5_MAX_PORTS];
556 struct kobject *dc_kobj;
557 /* Array with num_ports elements */
558 struct mlx5_ib_port *port;
559 struct kobject *ports_parent;
562 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
564 return container_of(mcq, struct mlx5_ib_cq, mcq);
567 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
569 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
572 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
574 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
577 static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
579 return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
582 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
584 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
587 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
589 return container_of(mqp, struct mlx5_ib_qp, mqp);
592 static inline struct mlx5_ib_qp *sq_to_mibqp(struct mlx5_core_qp *msq)
594 return container_of(msq, struct mlx5_ib_qp, msq);
597 static inline struct mlx5_ib_qp *rq_to_mibqp(struct mlx5_core_qp *mrq)
599 return container_of(mrq, struct mlx5_ib_qp, mrq);
602 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
604 return container_of(mmr, struct mlx5_ib_mr, mmr);
607 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
609 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
612 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
614 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
617 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
619 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
622 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
624 return container_of(msrq, struct mlx5_ib_srq, msrq);
627 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
629 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
632 static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
634 return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
642 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
644 return container_of(ibah, struct mlx5_ib_ah, ibah);
647 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
649 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
650 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
651 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
652 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
653 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
654 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
655 void *in_mad, void *response_mad);
656 struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev, struct ib_ah_attr *ah_attr,
657 struct mlx5_ib_ah *ah, enum rdma_link_layer ll);
658 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
659 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
660 int mlx5_ib_destroy_ah(struct ib_ah *ah);
661 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
662 struct ib_srq_init_attr *init_attr,
663 struct ib_udata *udata);
664 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
665 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
666 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
667 int mlx5_ib_destroy_srq(struct ib_srq *srq);
668 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
669 struct ib_recv_wr **bad_wr);
670 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
671 struct ib_qp_init_attr *init_attr,
672 struct ib_udata *udata);
673 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
674 int attr_mask, struct ib_udata *udata);
675 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
676 struct ib_qp_init_attr *qp_init_attr);
677 int mlx5_ib_destroy_qp(struct ib_qp *qp);
678 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
679 struct ib_send_wr **bad_wr);
680 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
681 struct ib_recv_wr **bad_wr);
682 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
683 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
684 struct ib_cq_init_attr *attr,
685 struct ib_ucontext *context,
686 struct ib_udata *udata);
687 int mlx5_ib_destroy_cq(struct ib_cq *cq);
688 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
689 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
690 int mlx5_ib_modify_cq(struct ib_cq *cq, struct ib_cq_attr *attr, int cq_attr_mask);
691 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
692 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
693 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
694 u64 virt_addr, int access_flags,
695 struct ib_udata *udata, int mr_id);
696 struct ib_mr *mlx5_ib_reg_phys_mr(struct ib_pd *pd,
697 struct ib_phys_buf *buffer_list,
701 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
702 int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
703 struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
704 int max_page_list_len);
705 struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
707 void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
709 struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
710 struct ib_fmr_attr *fmr_attr);
711 int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
712 int npages, u64 iova);
713 int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
714 int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
715 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
716 struct ib_wc *in_wc, struct ib_grh *in_grh,
717 struct ib_mad *in_mad, struct ib_mad *out_mad);
718 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
719 struct ib_ucontext *context,
720 struct ib_udata *udata);
721 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
722 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
723 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
724 int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev,
725 struct ib_smp *out_mad);
726 int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
727 __be64 *sys_image_guid);
728 int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
730 int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
732 int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index,
734 int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc);
735 int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid);
736 int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index,
738 int mlx5_query_port_mad_ifc(struct ib_device *ibdev, u8 port,
739 struct ib_port_attr *props);
740 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
741 struct ib_port_attr *props);
742 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
743 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
744 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
745 int *ncont, int *order);
746 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
747 int page_shift, __be64 *pas, int umr);
748 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
749 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
750 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
751 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
752 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
753 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
754 int mlx5_query_port_roce(struct ib_device *ibdev, u8 port,
755 struct ib_port_attr *props);
756 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port, int index,
757 __be16 ah_udp_s_port);
758 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port,
759 int index, int *gid_type);
760 struct net_device *mlx5_ib_get_netdev(struct ib_device *ib_dev, u8 port);
761 int modify_gid_roce(struct ib_device *ib_dev, u8 port, unsigned int index,
762 const union ib_gid *gid, struct net_device *ndev);
763 int query_gid_roce(struct ib_device *ib_dev, u8 port, int index,
765 int mlx5_process_mad_mad_ifc(struct ib_device *ibdev, int mad_flags,
766 u8 port_num, struct ib_wc *in_wc,
767 struct ib_grh *in_grh, struct ib_mad *in_mad,
768 struct ib_mad *out_mad);
770 static inline void init_query_mad(struct ib_smp *mad)
772 mad->base_version = 1;
773 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
774 mad->class_version = 1;
775 mad->method = IB_MGMT_METHOD_GET;
778 static inline u8 convert_access(int acc)
780 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
781 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
782 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
783 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
784 MLX5_PERM_LOCAL_READ;
787 #define MLX5_MAX_UMR_SHIFT 16
788 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
790 #endif /* MLX5_IB_H */