2 * Copyright (c) 2018-2019 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
33 * Author: David C Somayajulu
41 #include <linux/list.h>
42 #include <linux/spinlock.h>
43 #include <linux/idr.h>
44 #include <linux/completion.h>
45 #include <linux/netdevice.h>
46 #include <linux/sched.h>
47 #include <linux/pci.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/wait.h>
50 #include <linux/kref.h>
51 #include <linux/timer.h>
56 #include <asm/byteorder.h>
58 #include <netinet/in.h>
60 #include <netinet/toecore.h>
62 #include <rdma/ib_smi.h>
63 #include <rdma/ib_user_verbs.h>
64 #include <rdma/ib_addr.h>
65 #include <rdma/ib_verbs.h>
66 #include <rdma/iw_cm.h>
67 #include <rdma/ib_umem.h>
68 #include <rdma/ib_mad.h>
69 #include <rdma/ib_sa.h>
71 #if __FreeBSD_version < 1100000
79 #include "ecore_gtt_reg_addr.h"
81 #include "ecore_chain.h"
82 #include "ecore_status.h"
84 #include "ecore_rt_defs.h"
85 #include "ecore_init_ops.h"
86 #include "ecore_int.h"
87 #include "ecore_cxt.h"
88 #include "ecore_spq.h"
89 #include "ecore_init_fw_funcs.h"
90 #include "ecore_sp_commands.h"
91 #include "ecore_dev_api.h"
92 #include "ecore_l2_api.h"
93 #ifdef CONFIG_ECORE_SRIOV
94 #include "ecore_sriov.h"
97 #ifdef CONFIG_ECORE_LL2
98 #include "ecore_ll2.h"
100 #ifdef CONFIG_ECORE_FCOE
101 #include "ecore_fcoe.h"
103 #ifdef CONFIG_ECORE_ISCSI
104 #include "ecore_iscsi.h"
106 #include "ecore_mcp.h"
107 #include "ecore_hw_defs.h"
108 #include "mcp_public.h"
110 #ifdef CONFIG_ECORE_RDMA
111 #include "ecore_rdma.h"
112 #include "ecore_rdma_api.h"
115 #ifdef CONFIG_ECORE_ROCE
116 #include "ecore_roce.h"
119 #ifdef CONFIG_ECORE_IWARP
120 #include "ecore_iwarp.h"
123 #include "ecore_iro.h"
126 #include "ecore_dbg_fw_funcs.h"
127 #include "rdma_common.h"
129 #include "qlnx_ioctl.h"
130 #include "qlnx_def.h"
131 #include "qlnx_rdma.h"
132 #include "qlnxr_verbs.h"
133 #include "qlnxr_user.h"
134 #include "qlnx_ver.h"
137 #define QLNXR_ROCE_INTERFACE_VERSION 1801
139 #define QLNXR_MODULE_VERSION "8.18.1.0"
140 #define QLNXR_NODE_DESC "QLogic 579xx RoCE HCA"
142 #define OC_SKH_DEVICE_PF 0x720
143 #define OC_SKH_DEVICE_VF 0x728
144 #define QLNXR_MAX_AH 512
146 /* QLNXR Limitations */
149 * An S/RQ PBL contains a list a pointers to pages. Each page contains S/RQE
150 * elements. Several S/RQE elements make an S/RQE, up to a certain maximum that
151 * is different between SQ and RQ. The size of the PBL was chosen such as not to
152 * limit the MAX_WR supported by ECORE, and rounded up to a power of two.
155 #define QLNXR_MAX_SQ_PBL (0x8000) /* 2^15 bytes */
156 #define QLNXR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
157 #define QLNXR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) /* bytes */
158 #define QLNXR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
159 QLNXR_SQE_ELEMENT_SIZE) /* number */
160 #define QLNXR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
161 QLNXR_SQE_ELEMENT_SIZE) /* number */
162 #define QLNXR_MAX_SQE ((QLNXR_MAX_SQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
163 (QLNXR_SQE_ELEMENT_SIZE) / (QLNXR_MAX_SQE_ELEMENTS_PER_SQE))
165 #define QLNXR_MAX_RQ_PBL (0x2000) /* 2^13 bytes */
166 #define QLNXR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
167 #define QLNXR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) /* bytes */
168 #define QLNXR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) /* number */
169 #define QLNXR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
170 QLNXR_RQE_ELEMENT_SIZE) /* number */
171 #define QLNXR_MAX_RQE ((QLNXR_MAX_RQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
172 (QLNXR_RQE_ELEMENT_SIZE) / (QLNXR_MAX_RQE_ELEMENTS_PER_RQE))
175 * Although FW supports two layer PBL we use single layer since it is more
176 * than enough. For that layer we use a maximum size of 512 kB, again, because
177 * it reaches the maximum number of page pointers. Notice is the '-1' in the
178 * calculation that comes from having a u16 for the number of pages i.e. 0xffff
179 * is the maximum number of pages (in single layer).
181 #define QLNXR_CQE_SIZE (sizeof(union rdma_cqe))
182 #define QLNXR_MAX_CQE_PBL_SIZE (512*1024) /* 512kB */
183 #define QLNXR_MAX_CQE_PBL_ENTRIES (((QLNXR_MAX_CQE_PBL_SIZE) / \
184 sizeof(u64)) - 1) /* 64k -1 */
185 #define QLNXR_MAX_CQES ((u32)((QLNXR_MAX_CQE_PBL_ENTRIES) * (ECORE_CHAIN_PAGE_SIZE)\
186 / QLNXR_CQE_SIZE)) /* 8M -4096/32 = 8,388,480 */
188 /* CNQ size Limitation
189 * The maximum CNQ size is not reachable because the FW supports a chain of u16
190 * (specifically 64k-1). The FW can buffer CNQ elements avoiding an overflow, on
191 * the expense of performance. Hence we set it to an arbitrarily smaller value
194 #define QLNXR_ROCE_MAX_CNQ_SIZE (0x4000) /* 2^16 */
196 #define QLNXR_MAX_PORT (1)
197 #define QLNXR_PORT (1)
199 #define QLNXR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
201 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
203 /* The following number is used to determine if a handle recevied from the FW
204 * actually point to a CQ/QP.
206 #define QLNXR_CQ_MAGIC_NUMBER (0x11223344)
207 #define QLNXR_QP_MAGIC_NUMBER (0x77889900)
209 /* Fast path debug prints */
210 #define FP_DP_VERBOSE(...)
211 /* #define FP_DP_VERBOSE(...) DP_VERBOSE(__VA_ARGS__) */
213 #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
215 #define QLNXR_MSG_INIT 0x10000,
216 #define QLNXR_MSG_FAIL 0x10000,
217 #define QLNXR_MSG_CQ 0x20000,
218 #define QLNXR_MSG_RQ 0x40000,
219 #define QLNXR_MSG_SQ 0x80000,
220 #define QLNXR_MSG_QP (QLNXR_MSG_SQ | QLNXR_MSG_RQ),
221 #define QLNXR_MSG_MR 0x100000,
222 #define QLNXR_MSG_GSI 0x200000,
223 #define QLNXR_MSG_MISC 0x400000,
224 #define QLNXR_MSG_SRQ 0x800000,
225 #define QLNXR_MSG_IWARP 0x1000000,
227 #define QLNXR_ROCE_PKEY_MAX 1
228 #define QLNXR_ROCE_PKEY_TABLE_LEN 1
229 #define QLNXR_ROCE_PKEY_DEFAULT 0xffff
231 #define QLNXR_MAX_SGID 128 /* TBD - add more source gids... */
233 #define QLNXR_ENET_STATE_BIT (0)
235 #define QLNXR_MAX_MSIX (16)
239 struct qlnxr_dev *dev;
240 struct ecore_chain pbl;
241 struct ecore_sb_info *sb;
247 struct resource *irq;
251 struct qlnxr_device_attr {
252 /* Vendor specific information */
258 u64 node_guid; /* node GUID */
259 u64 sys_image_guid; /* System image GUID */
262 u8 max_sge; /* Maximum # of scatter/gather entries
263 * per Work Request supported
266 u32 max_sqe; /* Maximum number of send outstanding send work
267 * requests on any Work Queue supported
269 u32 max_rqe; /* Maximum number of receive outstanding receive
270 * work requests on any Work Queue supported
272 u8 max_qp_resp_rd_atomic_resc; /* Maximum number of RDMA Reads
273 * & atomic operation that can
274 * be outstanding per QP
277 u8 max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
278 * initiation of RDMA Read
279 * & atomic operations
281 u64 max_dev_resp_rd_atomic_resc;
284 u32 max_mr; /* Maximum # of MRs supported */
285 u64 max_mr_size; /* Size (in bytes) of largest contiguous memory
286 * block that can be registered by this device
289 u32 max_mw; /* Maximum # of memory windows supported */
291 u32 max_mr_mw_fmr_pbl;
292 u64 max_mr_mw_fmr_size;
293 u32 max_pd; /* Maximum # of protection domains supported */
296 u32 max_srq; /* Maximum number of SRQs */
297 u32 max_srq_wr; /* Maximum number of WRs per SRQ */
298 u8 max_srq_sge; /* Maximum number of SGE per WQE */
299 u8 max_stats_queues; /* Maximum number of statistics queues */
302 /* Abilty to support RNR-NAK generation */
304 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_MASK 0x1
305 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_SHIFT 0
306 /* Abilty to support shutdown port */
307 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
308 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
309 /* Abilty to support port active event */
310 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
311 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
312 /* Abilty to support port change event */
313 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
314 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
315 /* Abilty to support system image GUID */
316 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_MASK 0x1
317 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_SHIFT 4
318 /* Abilty to support bad P_Key counter support */
319 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
320 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
321 /* Abilty to support atomic operations */
322 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_MASK 0x1
323 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_SHIFT 6
324 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_MASK 0x1
325 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_SHIFT 7
326 /* Abilty to support modifying the maximum number of
327 * outstanding work requests per QP
329 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
330 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
332 /* Abilty to support automatic path migration */
333 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
334 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
335 /* Abilty to support the base memory management extensions */
336 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
337 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
338 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
339 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
340 /* Abilty to support multipile page sizes per memory region */
341 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
342 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
343 /* Abilty to support block list physical buffer list */
344 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_MASK 0x1
345 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_SHIFT 13
346 /* Abilty to support zero based virtual addresses */
347 #define QLNXR_ROCE_DEV_CAP_ZBVA_MASK 0x1
348 #define QLNXR_ROCE_DEV_CAP_ZBVA_SHIFT 14
349 /* Abilty to support local invalidate fencing */
350 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
351 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
352 /* Abilty to support Loopback on QP */
353 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_MASK 0x1
354 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_SHIFT 16
357 u32 reserved_lkey; /* Value of reserved L_key */
358 u32 bad_pkey_counter;/* Bad P_key counter support
361 struct ecore_rdma_events events;
365 struct ib_device ibdev;
367 struct ecore_dev *cdev;
369 /* Added to extend Applications Support */
370 struct pci_dev *pdev;
379 uint32_t wq_multiplier;
382 struct ecore_sb_info sb_array[QLNXR_MAX_MSIX];
383 struct qlnxr_cnq cnq_array[QLNXR_MAX_MSIX];
388 struct qlnxr_cq *gsi_sqcq;
389 struct qlnxr_cq *gsi_rqcq;
390 struct qlnxr_qp *gsi_qp;
392 /* TBD: we'll need an array of these probablly per DPI... */
393 void __iomem *db_addr;
394 uint64_t db_phys_addr;
399 enum ib_atomic_cap atomic_cap;
401 union ib_gid sgid_tbl[QLNXR_MAX_SGID];
402 struct mtx sgid_lock;
403 struct notifier_block nb_inet;
404 struct notifier_block nb_inet6;
407 struct list_head entry;
409 struct dentry *dbgfs;
411 uint8_t gsi_ll2_mac_address[ETH_ALEN];
412 uint8_t gsi_ll2_handle;
414 unsigned long enet_state;
416 struct workqueue_struct *iwarp_wq;
418 volatile uint32_t pd_count;
419 struct qlnxr_device_attr attr;
420 uint8_t user_dpm_enabled;
423 typedef struct qlnxr_dev qlnxr_dev_t;
429 struct qlnxr_ucontext *uctx;
432 struct qlnxr_ucontext {
433 struct ib_ucontext ibucontext;
434 struct qlnxr_dev *dev;
441 struct list_head mm_head;
442 struct mutex mm_list_lock;
447 struct qlnxr_dev_attr {
448 struct ib_device_attr ib_attr;
451 struct qlnxr_dma_mem {
458 struct list_head list_entry;
463 struct qlnxr_queue_info {
468 u16 entry_size; /* Size of an element in the queue */
469 u16 id; /* qid, where to ring the doorbell. */
475 struct qlnxr_queue_info q;
478 struct qlnxr_dev *dev;
483 struct qlnxr_queue_info sq;
484 struct qlnxr_queue_info cq;
489 u16 auto_speeds_supported;
490 u16 fixed_speeds_supported;
496 struct rdma_pwm_val32_data data;
502 QLNXR_CQ_TYPE_KERNEL,
506 struct qlnxr_pbl_info {
515 struct ib_umem *umem;
516 struct qlnxr_pbl_info pbl_info;
517 struct qlnxr_pbl *pbl_tbl;
523 struct ib_cq ibcq; /* must be first */
525 enum qlnxr_cq_type cq_type;
529 /* relevant to cqs created from kernel space only (ULPs) */
532 struct ecore_chain pbl;
534 void __iomem *db_addr; /* db address for cons update*/
538 union rdma_cqe *latest_cqe;
539 union rdma_cqe *toggle_cqe;
541 /* TODO: remove since it is redundant with 32 bit chains */
544 /* relevant to cqs created from user space only (applications) */
545 struct qlnxr_userq q;
547 /* destroy-IRQ handler race prevention */
555 struct ib_ah_attr attr;
559 struct rdma_pwm_val16_data data;
563 struct qlnxr_qp_hwq_info {
565 struct ecore_chain pbl;
570 u16 prod; /* WQE prod index for SW ring */
571 u16 cons; /* WQE cons index for SW ring */
573 u16 gsi_cons; /* filled in by GSI implementation */
577 void __iomem *db; /* Doorbell address */
578 union db_prod32 db_data; /* Doorbell data */
580 /* Required for iwarp_only */
581 void __iomem *iwarp_db2; /* Doorbell address */
582 union db_prod32 iwarp_db2_data; /* Doorbell data */
585 #define QLNXR_INC_SW_IDX(p_info, index) \
587 p_info->index = (p_info->index + 1) & \
588 ecore_chain_get_capacity(p_info->pbl) \
591 struct qlnxr_srq_hwq_info {
594 struct ecore_chain pbl;
596 u32 wqe_prod; /* WQE prod index in HW ring */
597 u32 sge_prod; /* SGE prod index in HW ring */
598 u32 wr_prod_cnt; /* wr producer count */
599 u32 wr_cons_cnt; /* wr consumer count */
602 u32 *virt_prod_pair_addr; /* producer pair virtual address */
603 dma_addr_t phy_prod_pair_addr; /* producer pair physical address */
608 struct qlnxr_dev *dev;
609 /* relevant to cqs created from user space only (applications) */
610 struct qlnxr_userq usrq;
611 struct qlnxr_srq_hwq_info hw_srq;
612 struct ib_umem *prod_umem;
614 /* lock to protect srq recv post */
618 enum qlnxr_qp_err_bitmap {
619 QLNXR_QP_ERR_SQ_FULL = 1 << 0,
620 QLNXR_QP_ERR_RQ_FULL = 1 << 1,
621 QLNXR_QP_ERR_BAD_SR = 1 << 2,
622 QLNXR_QP_ERR_BAD_RR = 1 << 3,
623 QLNXR_QP_ERR_SQ_PBL_FULL = 1 << 4,
624 QLNXR_QP_ERR_RQ_PBL_FULL = 1 << 5,
628 struct qlnxr_pbl *pbl_table;
629 struct qlnxr_pbl_info pbl_info;
630 struct list_head free_pbl_list;
631 struct list_head inuse_pbl_list;
633 u32 completed_handled;
636 #if __FreeBSD_version < 1102000
637 #define DEFINE_IB_FAST_REG
639 #define DEFINE_ALLOC_MR
642 #ifdef DEFINE_IB_FAST_REG
643 struct qlnxr_fast_reg_page_list {
644 struct ib_fast_reg_page_list ibfrpl;
645 struct qlnxr_dev *dev;
650 struct ib_qp ibqp; /* must be first */
651 struct qlnxr_dev *dev;
652 struct qlnxr_iw_ep *ep;
653 struct qlnxr_qp_hwq_info sq;
654 struct qlnxr_qp_hwq_info rq;
658 #if __FreeBSD_version >= 1100000
659 spinlock_t q_lock ____cacheline_aligned;
664 struct qlnxr_cq *sq_cq;
665 struct qlnxr_cq *rq_cq;
666 struct qlnxr_srq *srq;
667 enum ecore_roce_qp_state state; /* QP state */
670 enum ib_qp_type qp_type;
671 struct ecore_rdma_qp *ecore_qp;
680 u32 sig; /* unique siganture to identify valid QP */
682 /* relevant to qps created from kernel space only (ULPs) */
690 enum ib_wc_opcode opcode;
694 dma_addr_t icrc_mapping;
696 #ifdef DEFINE_IB_FAST_REG
697 struct qlnxr_fast_reg_page_list *frmr;
705 struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
714 /* relevant to qps created from user space only (applications) */
715 struct qlnxr_userq usq;
716 struct qlnxr_userq urq;
731 struct ib_umem *umem;
733 struct ecore_rdma_register_tid_in_params hw_mr;
734 enum qlnxr_mr_type type;
736 struct qlnxr_dev *dev;
742 u64 *iova_start; /* valid only for kernel_mr */
751 struct list_head entry;
754 struct qlnxr_iw_listener {
755 struct qlnxr_dev *dev;
756 struct iw_cm_id *cm_id;
762 struct qlnxr_dev *dev;
763 struct iw_cm_id *cm_id;
770 qlnxr_inc_sw_cons(struct qlnxr_qp_hwq_info *info)
772 info->cons = (info->cons + 1) % info->max_wr;
777 qlnxr_inc_sw_prod(struct qlnxr_qp_hwq_info *info)
779 info->prod = (info->prod + 1) % info->max_wr;
782 static inline struct qlnxr_dev *
783 get_qlnxr_dev(struct ib_device *ibdev)
785 return container_of(ibdev, struct qlnxr_dev, ibdev);
788 static inline struct qlnxr_ucontext *
789 get_qlnxr_ucontext(struct ib_ucontext *ibucontext)
791 return container_of(ibucontext, struct qlnxr_ucontext, ibucontext);
794 static inline struct qlnxr_pd *
795 get_qlnxr_pd(struct ib_pd *ibpd)
797 return container_of(ibpd, struct qlnxr_pd, ibpd);
800 static inline struct qlnxr_cq *
801 get_qlnxr_cq(struct ib_cq *ibcq)
803 return container_of(ibcq, struct qlnxr_cq, ibcq);
806 static inline struct qlnxr_qp *
807 get_qlnxr_qp(struct ib_qp *ibqp)
809 return container_of(ibqp, struct qlnxr_qp, ibqp);
812 static inline struct qlnxr_mr *
813 get_qlnxr_mr(struct ib_mr *ibmr)
815 return container_of(ibmr, struct qlnxr_mr, ibmr);
818 static inline struct qlnxr_ah *
819 get_qlnxr_ah(struct ib_ah *ibah)
821 return container_of(ibah, struct qlnxr_ah, ibah);
824 static inline struct qlnxr_srq *
825 get_qlnxr_srq(struct ib_srq *ibsrq)
827 return container_of(ibsrq, struct qlnxr_srq, ibsrq);
830 static inline bool qlnxr_qp_has_srq(struct qlnxr_qp *qp)
835 static inline bool qlnxr_qp_has_sq(struct qlnxr_qp *qp)
837 if (qp->qp_type == IB_QPT_GSI)
843 static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
845 if (qp->qp_type == IB_QPT_GSI || qlnxr_qp_has_srq(qp))
852 #ifdef DEFINE_IB_FAST_REG
853 static inline struct qlnxr_fast_reg_page_list *get_qlnxr_frmr_list(
854 struct ib_fast_reg_page_list *ifrpl)
856 return container_of(ifrpl, struct qlnxr_fast_reg_page_list, ibfrpl);
860 #define SET_FIELD2(value, name, flag) \
862 (value) |= ((flag) << (name ## _SHIFT)); \
865 #define QLNXR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
866 RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
867 #define QLNXR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
868 RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
869 #define QLNXR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
870 RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
872 #define QLNXR_RESP_RDMA_IMM (QLNXR_RESP_IMM | QLNXR_RESP_RDMA)
875 qlnxr_get_dmac(struct qlnxr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr)
877 #ifdef DEFINE_NO_IP_BASED_GIDS
878 u8 *guid = &ah_attr->grh.dgid.raw[8]; /* GID's 64 MSBs are the GUID */
880 union ib_gid zero_sgid = { { 0 } };
883 if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
884 memset(mac_addr, 0x00, ETH_ALEN);
888 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
890 #ifdef DEFINE_NO_IP_BASED_GIDS
891 /* get the MAC address from the GUID i.e. EUI-64 to MAC address */
892 mac_addr[0] = guid[0] ^ 2; /* toggle the local/universal bit to local */
893 mac_addr[1] = guid[1];
894 mac_addr[2] = guid[2];
895 mac_addr[3] = guid[5];
896 mac_addr[4] = guid[6];
897 mac_addr[5] = guid[7];
899 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
904 extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
905 uint8_t *new_mac_address);
908 #define QLNXR_ROCE_PKEY_MAX 1
909 #define QLNXR_ROCE_PKEY_TABLE_LEN 1
910 #define QLNXR_ROCE_PKEY_DEFAULT 0xffff
912 #if __FreeBSD_version < 1100000
913 #define DEFINE_IB_AH_ATTR_WITH_DMAC (0)
914 #define DEFINE_IB_UMEM_WITH_CHUNK (1)
916 #define DEFINE_IB_AH_ATTR_WITH_DMAC (1)
919 #define QLNX_IS_IWARP(rdev) IS_IWARP(ECORE_LEADING_HWFN(rdev->cdev))
920 #define QLNX_IS_ROCE(rdev) IS_ROCE(ECORE_LEADING_HWFN(rdev->cdev))
922 #define MAX_RXMIT_CONNS 16
924 #endif /* #ifndef __QLNX_DEF_H_ */