2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/printk.h>
34 #include <rdma/ib_verbs.h>
35 #include <rdma/ib_smi.h>
36 #include <dev/mlx5/cq.h>
37 #include <dev/mlx5/qp.h>
38 #include <dev/mlx5/srq.h>
39 #include <linux/types.h>
40 #include <dev/mlx5/mlx5_core/transobj.h>
41 #include <rdma/ib_user_verbs.h>
42 #include <rdma/mlx5-abi.h>
44 #define mlx5_ib_dbg(dev, format, arg...) \
45 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
46 __LINE__, current->pid, ##arg)
48 #define mlx5_ib_err(dev, format, arg...) \
49 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
50 __LINE__, current->pid, ##arg)
52 #define mlx5_ib_warn(dev, format, arg...) \
53 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
54 __LINE__, current->pid, ##arg)
56 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
57 sizeof(((type *)0)->fld) <= (sz))
58 #define MLX5_IB_DEFAULT_UIDX 0xffffff
59 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
62 MLX5_IB_MMAP_CMD_SHIFT = 8,
63 MLX5_IB_MMAP_CMD_MASK = 0xff,
66 enum mlx5_ib_mmap_cmd {
67 MLX5_IB_MMAP_REGULAR_PAGE = 0,
68 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
69 MLX5_IB_MMAP_WC_PAGE = 2,
70 MLX5_IB_MMAP_NC_PAGE = 3,
71 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
72 MLX5_IB_MMAP_CORE_CLOCK = 5,
76 MLX5_RES_SCAT_DATA32_CQE = 0x1,
77 MLX5_RES_SCAT_DATA64_CQE = 0x2,
78 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
79 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
82 enum mlx5_ib_latency_class {
83 MLX5_IB_LATENCY_CLASS_LOW,
84 MLX5_IB_LATENCY_CLASS_MEDIUM,
85 MLX5_IB_LATENCY_CLASS_HIGH,
86 MLX5_IB_LATENCY_CLASS_FAST_PATH
89 enum mlx5_ib_mad_ifc_flags {
90 MLX5_MAD_IFC_IGNORE_MKEY = 1,
91 MLX5_MAD_IFC_IGNORE_BKEY = 2,
92 MLX5_MAD_IFC_NET_VIEW = 4,
96 MLX5_CROSS_CHANNEL_UUAR = 0,
104 struct mlx5_ib_vma_private_data {
105 struct list_head list;
106 struct vm_area_struct *vma;
109 struct mlx5_ib_ucontext {
110 struct ib_ucontext ibucontext;
111 struct list_head db_page_list;
113 /* protect doorbell record alloc/free
115 struct mutex db_page_mutex;
116 struct mlx5_uuar_info uuari;
118 /* Transport Domain number */
120 struct list_head vma_private_list;
123 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
125 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
133 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
134 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
135 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
136 #error "Invalid number of bypass priorities"
138 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
140 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
141 #define MLX5_IB_NUM_SNIFFER_FTS 2
142 struct mlx5_ib_flow_prio {
143 struct mlx5_flow_table *flow_table;
144 unsigned int refcount;
147 struct mlx5_ib_flow_handler {
148 struct list_head list;
149 struct ib_flow ibflow;
150 struct mlx5_ib_flow_prio *prio;
151 struct mlx5_flow_rule *rule;
154 struct mlx5_ib_flow_db {
155 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
156 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
157 struct mlx5_flow_table *lag_demux_ft;
158 /* Protect flow steering bypass flow tables
159 * when add/del flow rules.
160 * only single add/removal of flow steering rule could be done
166 /* Use macros here so that don't have to duplicate
167 * enum ib_send_flags and enum ib_qp_type for low-level driver
170 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
171 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
172 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
174 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
175 #define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
176 #define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
178 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
180 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
181 * creates the actual hardware QP.
183 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
184 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
186 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
188 * These flags are intended for internal use by the mlx5_ib driver, and they
189 * rely on the range reserved for that use in the ib_qp_create_flags enum.
192 /* Create a UD QP whose source QP number is 1 */
193 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
195 return IB_QP_CREATE_RESERVED_START;
206 struct wr_list *w_list;
210 /* serialize post to the work queue
227 struct mlx5_core_qp core_qp;
233 struct ib_umem *umem;
235 unsigned int page_shift;
255 struct mlx5_ib_rwq_ind_table {
256 struct ib_rwq_ind_table ib_rwq_ind_tbl;
261 * Connect-IB can trigger up to four concurrent pagefaults
264 enum mlx5_ib_pagefault_context {
265 MLX5_IB_PAGEFAULT_RESPONDER_READ,
266 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
267 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
268 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
269 MLX5_IB_PAGEFAULT_CONTEXTS
272 static inline enum mlx5_ib_pagefault_context
273 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
275 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
278 struct mlx5_ib_pfault {
279 struct work_struct work;
280 struct mlx5_pagefault mpfault;
283 struct mlx5_ib_ubuffer {
284 struct ib_umem *umem;
289 struct mlx5_ib_qp_base {
290 struct mlx5_ib_qp *container_mibqp;
291 struct mlx5_core_qp mqp;
292 struct mlx5_ib_ubuffer ubuffer;
295 struct mlx5_ib_qp_trans {
296 struct mlx5_ib_qp_base base;
303 struct mlx5_ib_rss_qp {
308 struct mlx5_ib_qp_base base;
309 struct mlx5_ib_wq *rq;
310 struct mlx5_ib_ubuffer ubuffer;
311 struct mlx5_db *doorbell;
317 struct mlx5_ib_qp_base base;
318 struct mlx5_ib_wq *sq;
319 struct mlx5_ib_ubuffer ubuffer;
320 struct mlx5_db *doorbell;
325 struct mlx5_ib_raw_packet_qp {
326 struct mlx5_ib_sq sq;
327 struct mlx5_ib_rq rq;
333 struct mlx5_ib_qp_trans trans_qp;
334 struct mlx5_ib_raw_packet_qp raw_packet_qp;
335 struct mlx5_ib_rss_qp rss_qp;
340 struct mlx5_ib_wq rq;
344 struct mlx5_ib_wq sq;
346 /* serialize qp state modifications
358 /* only for user space QPs. For kernel
359 * we have it from the bf object
365 /* Store signature errors */
368 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
370 * A flag that is true for QP's that are in a state that doesn't
371 * allow page faults, and shouldn't schedule any more faults.
373 int disable_page_faults;
375 * The disable_page_faults_lock protects a QP's disable_page_faults
376 * field, allowing for a thread to atomically check whether the QP
377 * allows page faults, and if so schedule a page fault.
379 spinlock_t disable_page_faults_lock;
380 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
382 struct list_head qps_list;
383 struct list_head cq_recv_list;
384 struct list_head cq_send_list;
387 struct mlx5_ib_cq_buf {
389 struct ib_umem *umem;
394 enum mlx5_ib_qp_flags {
395 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
396 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
397 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
398 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
399 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
400 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
401 /* QP uses 1 as its source QP number */
402 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
403 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
404 MLX5_IB_QP_RSS = 1 << 8,
408 struct ib_send_wr wr;
414 unsigned int page_shift;
421 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
423 return container_of(wr, struct mlx5_umr_wr, wr);
426 struct mlx5_shared_mr_info {
428 struct ib_umem *umem;
433 struct mlx5_core_cq mcq;
434 struct mlx5_ib_cq_buf buf;
437 /* serialize access to the CQ
443 struct mutex resize_mutex;
444 struct mlx5_ib_cq_buf *resize_buf;
445 struct ib_umem *resize_umem;
447 struct list_head list_send_qp;
448 struct list_head list_recv_qp;
450 struct list_head wc_list;
451 enum ib_cq_notify_flags notify_flags;
452 struct work_struct notify_work;
457 struct list_head list;
462 struct mlx5_core_srq msrq;
466 /* protect SRQ hanlding
472 struct ib_umem *umem;
473 /* serialize arming a SRQ
479 struct mlx5_ib_xrcd {
480 struct ib_xrcd ibxrcd;
484 enum mlx5_ib_mtt_access_flags {
485 MLX5_IB_MTT_READ = (1 << 0),
486 MLX5_IB_MTT_WRITE = (1 << 1),
489 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
499 struct mlx5_core_mr mmkey;
500 struct ib_umem *umem;
501 struct mlx5_shared_mr_info *smr_info;
502 struct list_head list;
506 struct mlx5_ib_dev *dev;
507 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
508 struct mlx5_core_sig_ctx *sig;
511 int access_flags; /* Needed for rereg MR */
516 struct mlx5_core_mr mmkey;
519 struct mlx5_ib_umr_context {
521 enum ib_wc_status status;
522 struct completion done;
529 /* control access to UMR QP
531 struct semaphore sem;
540 struct mlx5_cache_ent {
541 struct list_head head;
542 /* sync access to the cahce entry
555 struct dentry *fsize;
557 struct dentry *fmiss;
558 struct dentry *flimit;
560 struct mlx5_ib_dev *dev;
561 struct work_struct work;
562 struct delayed_work dwork;
566 struct mlx5_mr_cache {
567 struct workqueue_struct *wq;
568 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
571 unsigned long last_add;
574 struct mlx5_ib_gsi_qp;
576 struct mlx5_ib_port_resources {
577 struct mlx5_ib_resources *devr;
578 struct mlx5_ib_gsi_qp *gsi;
579 struct work_struct pkey_change_work;
582 struct mlx5_ib_resources {
589 struct mlx5_ib_port_resources ports[2];
590 /* Protects changes to the port resources */
594 struct mlx5_ib_port {
599 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
602 rwlock_t netdev_lock;
603 struct net_device *netdev;
604 struct notifier_block nb;
608 #define MLX5_IB_STATS_COUNT(a,b,c,d) a
609 #define MLX5_IB_STATS_VAR(a,b,c,d) b;
610 #define MLX5_IB_STATS_DESC(a,b,c,d) c, d,
612 #define MLX5_IB_CONG_PARAMS(m) \
614 m(+1, u64 rp_clamp_tgt_rate, "rp_clamp_tgt_rate", "If set, whenever a CNP is processed, the target rate is updated to be the current rate") \
615 m(+1, u64 rp_clamp_tgt_rate_ati, "rp_clamp_tgt_rate_ati", "If set, when receiving a CNP, the target rate should be updated if the transission rate was increased due to the timer, and not only due to the byte counter") \
616 m(+1, u64 rp_time_reset, "rp_time_reset", "Time in microseconds between rate increases if no CNPs are received") \
617 m(+1, u64 rp_byte_reset, "rp_byte_reset", "Transmitted data in bytes between rate increases if no CNP's are received. A value of zero means disabled.") \
618 m(+1, u64 rp_threshold, "rp_threshold", "The number of times rpByteStage or rpTimeStage can count before the RP rate control state machine advances states") \
619 m(+1, u64 rp_ai_rate, "rp_ai_rate", "The rate, in Mbits per second, used to increase rpTargetRate in the active increase state") \
620 m(+1, u64 rp_hai_rate, "rp_hai_rate", "The rate, in Mbits per second, used to increase rpTargetRate in the hyper increase state") \
621 m(+1, u64 rp_min_dec_fac, "rp_min_dec_fac", "The minimum factor by which the current transmit rate can be changed when processing a CNP. Value is given as a percentage, [1 .. 100]") \
622 m(+1, u64 rp_min_rate, "rp_min_rate", "The minimum value, in Mbps per second, for rate to limit") \
623 m(+1, u64 rp_rate_to_set_on_first_cnp, "rp_rate_to_set_on_first_cnp", "The rate that is set for the flow when a rate limiter is allocated to it upon first CNP received, in Mbps. A value of zero means use full port speed") \
624 m(+1, u64 rp_dce_tcp_g, "rp_dce_tcp_g", "Used to update the congestion estimator, alpha, once every dce_tcp_rtt once every dce_tcp_rtt microseconds") \
625 m(+1, u64 rp_dce_tcp_rtt, "rp_dce_tcp_rtt", "The time between updates of the aolpha value, in microseconds") \
626 m(+1, u64 rp_rate_reduce_monitor_period, "rp_rate_reduce_monitor_period", "The minimum time between two consecutive rate reductions for a single flow") \
627 m(+1, u64 rp_initial_alpha_value, "rp_initial_alpha_value", "The initial value of alpha to use when receiving the first CNP for a flow") \
628 m(+1, u64 rp_gd, "rp_gd", "If a CNP is received, the flow rate is reduced at the beginning of the next rate_reduce_monitor_period interval") \
630 m(+1, u64 np_cnp_dscp, "np_cnp_dscp", "The DiffServ Code Point of the generated CNP for this port") \
631 m(+1, u64 np_cnp_prio_mode, "np_cnp_prio_mode", "The 802.1p priority value of the generated CNP for this port") \
632 m(+1, u64 np_cnp_prio, "np_cnp_prio", "The 802.1p priority value of the generated CNP for this port")
634 #define MLX5_IB_CONG_PARAMS_NUM (0 MLX5_IB_CONG_PARAMS(MLX5_IB_STATS_COUNT))
636 #define MLX5_IB_CONG_STATS(m) \
637 m(+1, u64 syndrome, "syndrome", "Syndrome number") \
638 m(+1, u64 rp_cur_flows, "rp_cur_flows", "Number of flows limited") \
639 m(+1, u64 sum_flows, "sum_flows", "Sum of the number of flows limited over time") \
640 m(+1, u64 rp_cnp_ignored, "rp_cnp_ignored", "Number of CNPs and CNMs ignored") \
641 m(+1, u64 rp_cnp_handled, "rp_cnp_handled", "Number of CNPs and CNMs successfully handled") \
642 m(+1, u64 time_stamp, "time_stamp", "Time stamp in microseconds") \
643 m(+1, u64 accumulators_period, "accumulators_period", "The value of X variable for accumulating counters") \
644 m(+1, u64 np_ecn_marked_roce_packets, "np_ecn_marked_roce_packets", "Number of ECN marked packets seen") \
645 m(+1, u64 np_cnp_sent, "np_cnp_sent", "Number of CNPs sent")
647 #define MLX5_IB_CONG_STATS_NUM (0 MLX5_IB_CONG_STATS(MLX5_IB_STATS_COUNT))
649 struct mlx5_ib_congestion {
650 struct sysctl_ctx_list ctx;
652 struct delayed_work dwork;
654 MLX5_IB_CONG_PARAMS(MLX5_IB_STATS_VAR)
655 MLX5_IB_CONG_STATS(MLX5_IB_STATS_VAR)
659 struct ib_device ib_dev;
660 struct mlx5_core_dev *mdev;
661 struct mlx5_roce roce;
662 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
664 /* serialize update of capability mask
666 struct mutex cap_mask_mutex;
668 struct umr_common umrc;
669 /* sync used page count stats
671 struct mlx5_ib_resources devr;
672 struct mlx5_mr_cache cache;
673 struct timer_list delay_timer;
674 /* Prevents soft lock on massive reg MRs */
675 struct mutex slow_path_mutex;
677 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
678 struct ib_odp_caps odp_caps;
680 * Sleepable RCU that prevents destruction of MRs while they are still
681 * being used by a page fault handler.
683 struct srcu_struct mr_srcu;
685 struct mlx5_ib_flow_db flow_db;
686 /* protect resources needed as part of reset flow */
687 spinlock_t reset_flow_resource_lock;
688 struct list_head qp_list;
689 /* Array with num_ports elements */
690 struct mlx5_ib_port *port;
691 struct mlx5_ib_congestion congestion;
694 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
696 return container_of(mcq, struct mlx5_ib_cq, mcq);
699 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
701 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
704 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
706 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
709 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
711 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
714 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
716 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
719 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
721 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
724 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmkey)
726 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
729 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
731 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
734 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
736 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
739 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
741 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
744 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
746 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
749 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
751 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
754 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
756 return container_of(msrq, struct mlx5_ib_srq, msrq);
759 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
761 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
764 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
766 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
774 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
776 return container_of(ibah, struct mlx5_ib_ah, ibah);
779 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
781 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
782 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
783 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
784 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
785 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
786 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
787 const void *in_mad, void *response_mad);
788 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
789 struct ib_udata *udata);
790 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
791 int mlx5_ib_destroy_ah(struct ib_ah *ah);
792 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
793 struct ib_srq_init_attr *init_attr,
794 struct ib_udata *udata);
795 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
796 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
797 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
798 int mlx5_ib_destroy_srq(struct ib_srq *srq);
799 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
800 struct ib_recv_wr **bad_wr);
801 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
802 struct ib_qp_init_attr *init_attr,
803 struct ib_udata *udata);
804 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
805 int attr_mask, struct ib_udata *udata);
806 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
807 struct ib_qp_init_attr *qp_init_attr);
808 int mlx5_ib_destroy_qp(struct ib_qp *qp);
809 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
810 struct ib_send_wr **bad_wr);
811 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
812 struct ib_recv_wr **bad_wr);
813 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
814 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
815 void *buffer, u32 length,
816 struct mlx5_ib_qp_base *base);
817 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
818 const struct ib_cq_init_attr *attr,
819 struct ib_ucontext *context,
820 struct ib_udata *udata);
821 int mlx5_ib_destroy_cq(struct ib_cq *cq);
822 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
823 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
824 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
825 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
826 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
827 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
828 u64 virt_addr, int access_flags,
829 struct ib_udata *udata);
830 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
831 struct ib_udata *udata);
832 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
833 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
834 int npages, int zap);
835 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
836 u64 length, u64 virt_addr, int access_flags,
837 struct ib_pd *pd, struct ib_udata *udata);
838 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
839 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
840 enum ib_mr_type mr_type,
842 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
843 unsigned int *sg_offset);
844 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
845 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
846 const struct ib_mad_hdr *in, size_t in_mad_size,
847 struct ib_mad_hdr *out, size_t *out_mad_size,
848 u16 *out_mad_pkey_index);
849 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
850 struct ib_ucontext *context,
851 struct ib_udata *udata);
852 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
853 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
854 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
855 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
856 struct ib_smp *out_mad);
857 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
858 __be64 *sys_image_guid);
859 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
861 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
863 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
864 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
865 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
867 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
869 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
870 struct ib_port_attr *props);
871 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
872 struct ib_port_attr *props);
873 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
874 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
875 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
876 int *ncont, int *order);
877 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
878 int page_shift, size_t offset, size_t num_pages,
879 __be64 *pas, int access_flags);
880 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
881 int page_shift, __be64 *pas, int access_flags);
882 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
883 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
884 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
885 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
886 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
887 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
888 struct ib_mr_status *mr_status);
889 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
890 struct ib_wq_init_attr *init_attr,
891 struct ib_udata *udata);
892 int mlx5_ib_destroy_wq(struct ib_wq *wq);
893 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
894 u32 wq_attr_mask, struct ib_udata *udata);
895 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
896 struct ib_rwq_ind_table_init_attr *init_attr,
897 struct ib_udata *udata);
898 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
900 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
901 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
903 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
904 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
905 struct mlx5_ib_pfault *pfault);
906 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
907 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
908 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
909 int __init mlx5_ib_odp_init(void);
910 void mlx5_ib_odp_cleanup(void);
911 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
912 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
913 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
915 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
916 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
921 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
922 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
923 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
924 static inline int mlx5_ib_odp_init(void) { return 0; }
925 static inline void mlx5_ib_odp_cleanup(void) {}
926 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
927 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
929 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
931 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
932 u8 port, struct ifla_vf_info *info);
933 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
935 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
936 u8 port, struct ifla_vf_stats *stats);
937 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
940 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
943 /* GSI QP helper functions */
944 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
945 struct ib_qp_init_attr *init_attr);
946 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
947 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
949 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
951 struct ib_qp_init_attr *qp_init_attr);
952 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
953 struct ib_send_wr **bad_wr);
954 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
955 struct ib_recv_wr **bad_wr);
956 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
958 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
960 static inline void init_query_mad(struct ib_smp *mad)
962 mad->base_version = 1;
963 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
964 mad->class_version = 1;
965 mad->method = IB_MGMT_METHOD_GET;
968 static inline u8 convert_access(int acc)
970 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
971 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
972 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
973 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
974 MLX5_PERM_LOCAL_READ;
977 static inline int is_qp1(enum ib_qp_type qp_type)
979 return qp_type == MLX5_IB_QPT_HW_GSI;
982 #define MLX5_MAX_UMR_SHIFT 16
983 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
985 static inline u32 check_cq_create_flags(u32 flags)
988 * It returns non-zero value for unsupported CQ
989 * create flags, otherwise it returns zero.
991 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
992 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
995 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
999 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1000 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1002 *user_index = cmd_uidx;
1004 *user_index = MLX5_IB_DEFAULT_UIDX;
1010 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1011 struct mlx5_ib_create_qp *ucmd,
1015 u8 cqe_version = ucontext->cqe_version;
1017 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1018 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1021 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1025 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1028 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1029 struct mlx5_ib_create_srq *ucmd,
1033 u8 cqe_version = ucontext->cqe_version;
1035 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1036 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1039 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1043 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1046 void mlx5_ib_cleanup_congestion(struct mlx5_ib_dev *);
1047 int mlx5_ib_init_congestion(struct mlx5_ib_dev *);
1049 #endif /* MLX5_IB_H */