2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <dev/mlx5/driver.h>
33 #define MLX5_INVALID_LKEY 0x100
34 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
35 #define MLX5_DIF_SIZE 8
36 #define MLX5_STRIDE_BLOCK_OP 0x400
37 #define MLX5_CPY_GRD_MASK 0xc0
38 #define MLX5_CPY_APP_MASK 0x30
39 #define MLX5_CPY_REF_MASK 0x0f
40 #define MLX5_BSF_INC_REFTAG (1 << 6)
41 #define MLX5_BSF_INL_VALID (1 << 15)
42 #define MLX5_BSF_REFRESH_DIF (1 << 14)
43 #define MLX5_BSF_REPEAT_BLOCK (1 << 7)
44 #define MLX5_BSF_APPTAG_ESCAPE 0x1
45 #define MLX5_BSF_APPREF_ESCAPE 0x2
46 #define MLX5_WQE_DS_UNITS 16
49 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
50 MLX5_QP_OPTPAR_RRE = 1 << 1,
51 MLX5_QP_OPTPAR_RAE = 1 << 2,
52 MLX5_QP_OPTPAR_RWE = 1 << 3,
53 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
54 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
55 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
56 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
57 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
58 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
59 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
60 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
61 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
62 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
63 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
64 MLX5_QP_OPTPAR_SRQN = 1 << 18,
65 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
66 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
67 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
71 MLX5_QP_STATE_RST = 0,
72 MLX5_QP_STATE_INIT = 1,
73 MLX5_QP_STATE_RTR = 2,
74 MLX5_QP_STATE_RTS = 3,
75 MLX5_QP_STATE_SQER = 4,
76 MLX5_QP_STATE_SQD = 5,
77 MLX5_QP_STATE_ERR = 6,
78 MLX5_QP_STATE_SQ_DRAINING = 7,
79 MLX5_QP_STATE_SUSPENDED = 9,
86 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
87 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
88 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
89 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
100 MLX5_QP_ST_QP0 = 0x7,
101 MLX5_QP_ST_QP1 = 0x8,
102 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
103 MLX5_QP_ST_RAW_IPV6 = 0xa,
104 MLX5_QP_ST_SNIFFER = 0xb,
105 MLX5_QP_ST_SYNC_UMR = 0xe,
106 MLX5_QP_ST_PTP_1588 = 0xd,
107 MLX5_QP_ST_REG_UMR = 0xc,
108 MLX5_QP_ST_SW_CNAK = 0x10,
113 MLX5_NON_ZERO_RQ = 0x0,
116 MLX5_ZERO_LEN_RQ = 0x3
121 MLX5_QP_BIT_SRE = 1 << 15,
122 MLX5_QP_BIT_SWE = 1 << 14,
123 MLX5_QP_BIT_SAE = 1 << 13,
125 MLX5_QP_BIT_RRE = 1 << 15,
126 MLX5_QP_BIT_RWE = 1 << 14,
127 MLX5_QP_BIT_RAE = 1 << 13,
128 MLX5_QP_BIT_RIC = 1 << 4,
129 MLX5_QP_BIT_COLL_SYNC_RQ = 1 << 2,
130 MLX5_QP_BIT_COLL_SYNC_SQ = 1 << 1,
131 MLX5_QP_BIT_COLL_MASTER = 1 << 0
135 MLX5_DCT_BIT_RRE = 1 << 19,
136 MLX5_DCT_BIT_RWE = 1 << 18,
137 MLX5_DCT_BIT_RAE = 1 << 17,
141 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
142 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
143 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
147 MLX5_SEND_WQE_DS = 16,
148 MLX5_SEND_WQE_BB = 64,
151 #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
154 MLX5_SEND_WQE_MAX_WQEBBS = 16,
158 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
159 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
160 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
161 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
162 MLX5_WQE_FMR_PERM_ATOMIC = 1U << 31
166 MLX5_FENCE_MODE_NONE = 0 << 5,
167 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
168 MLX5_FENCE_MODE_FENCE = 2 << 5,
169 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
170 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
179 MLX5_FLAGS_INLINE = 1<<7,
180 MLX5_FLAGS_CHECK_FREE = 1<<5,
183 struct mlx5_wqe_fmr_seg {
194 struct mlx5_wqe_ctrl_seg {
195 __be32 opmod_idx_opcode;
203 #define MLX5_WQE_CTRL_DS_MASK 0x3f
206 MLX5_MLX_FLAG_MASK_VL15 = 0x40,
207 MLX5_MLX_FLAG_MASK_SLR = 0x20,
208 MLX5_MLX_FLAG_MASK_ICRC = 0x8,
209 MLX5_MLX_FLAG_MASK_FL = 4
212 struct mlx5_mlx_seg {
221 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
222 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
223 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
224 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
228 MLX5_ETH_WQE_SWP_INNER_L3_TYPE = 1 << 0,
229 MLX5_ETH_WQE_SWP_INNER_L4_TYPE = 1 << 1,
230 MLX5_ETH_WQE_SWP_OUTER_L3_TYPE = 1 << 4,
231 MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 5,
234 struct mlx5_wqe_eth_seg {
235 u8 swp_outer_l4_offset;
236 u8 swp_outer_l3_offset;
237 u8 swp_inner_l4_offset;
238 u8 swp_inner_l3_offset;
245 __be16 inline_hdr_sz;
246 u8 inline_hdr_start[2];
255 struct mlx5_wqe_xrc_seg {
260 struct mlx5_wqe_masked_atomic_seg {
263 __be64 swap_add_mask;
290 struct mlx5_wqe_datagram_seg {
294 struct mlx5_wqe_raddr_seg {
300 struct mlx5_wqe_atomic_seg {
305 struct mlx5_wqe_data_seg {
311 struct mlx5_wqe_umr_ctrl_seg {
314 __be16 klm_octowords;
315 __be16 bsf_octowords;
320 struct mlx5_seg_set_psv {
324 __be32 transient_sig;
328 struct mlx5_seg_get_psv {
336 struct mlx5_seg_check_psv {
338 __be16 err_coalescing_op;
342 __be16 xport_err_mask;
350 struct mlx5_rwqe_sig {
356 struct mlx5_wqe_signature_seg {
362 struct mlx5_wqe_inline_seg {
371 struct mlx5_bsf_inl {
378 u8 dif_inc_ref_guard_check;
379 __be16 dif_app_bitmask_check;
383 struct mlx5_bsf_basic {
395 __be32 raw_data_size;
399 struct mlx5_bsf_ext {
400 __be32 t_init_gen_pro_size;
401 __be32 rsvd_epi_size;
405 struct mlx5_bsf_inl w_inl;
406 struct mlx5_bsf_inl m_inl;
415 struct mlx5_stride_block_entry {
422 struct mlx5_stride_block_ctrl_seg {
423 __be32 bcount_per_cycle;
430 enum mlx5_pagefault_flags {
431 MLX5_PFAULT_REQUESTOR = 1 << 0,
432 MLX5_PFAULT_WRITE = 1 << 1,
433 MLX5_PFAULT_RDMA = 1 << 2,
436 /* Contains the details of a pagefault. */
437 struct mlx5_pagefault {
440 enum mlx5_pagefault_flags flags;
442 /* Initiator or send message responder pagefault details. */
444 /* Received packet size, only valid for responders. */
447 * WQE index. Refers to either the send queue or
448 * receive queue, according to event_subtype.
452 /* RDMA responder pagefault details */
456 * Received packet size, minimal size page fault
457 * resolution required for forward progress.
466 struct mlx5_core_qp {
467 struct mlx5_core_rsc_common common; /* must be first */
468 void (*event) (struct mlx5_core_qp *, int);
470 struct mlx5_rsc_debug *dbg;
474 struct mlx5_qp_path {
485 __be32 tclass_flowlabel;
498 struct mlx5_qp_context {
504 __be32 qp_counter_set_usr_page;
506 __be32 log_pg_sz_remote_qpn;
507 struct mlx5_qp_path pri_path;
508 struct mlx5_qp_path alt_path;
511 __be32 next_send_psn;
515 __be32 last_acked_psn;
518 __be32 rnr_nextrecvpsn;
525 __be16 hw_sq_wqe_counter;
526 __be16 sw_sq_wqe_counter;
527 __be16 hw_rcyclic_byte_counter;
528 __be16 hw_rq_counter;
529 __be16 sw_rcyclic_byte_counter;
530 __be16 sw_rq_counter;
535 __be64 dc_access_key;
539 struct mlx5_dct_context {
550 __be32 tclass_flow_label;
559 __be32 access_violations;
563 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
565 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
568 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
570 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
573 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
574 struct mlx5_core_qp *qp,
577 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
578 u32 opt_param_mask, void *qpc,
579 struct mlx5_core_qp *qp);
580 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
581 struct mlx5_core_qp *qp);
582 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
583 u32 *out, int outlen);
584 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
585 u32 *out, int outlen);
586 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct);
588 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
589 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
590 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
591 struct mlx5_core_dct *dct,
593 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
594 struct mlx5_core_dct *dct);
595 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
596 struct mlx5_core_qp *rq);
597 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
598 struct mlx5_core_qp *rq);
599 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
600 struct mlx5_core_qp *sq);
601 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
602 struct mlx5_core_qp *sq);
603 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
604 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
605 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
606 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
608 static inline const char *mlx5_qp_type_str(int type)
611 case MLX5_QP_ST_RC: return "RC";
612 case MLX5_QP_ST_UC: return "C";
613 case MLX5_QP_ST_UD: return "UD";
614 case MLX5_QP_ST_XRC: return "XRC";
615 case MLX5_QP_ST_MLX: return "MLX";
616 case MLX5_QP_ST_DCI: return "DCI";
617 case MLX5_QP_ST_QP0: return "QP0";
618 case MLX5_QP_ST_QP1: return "QP1";
619 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
620 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
621 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
622 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
623 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
624 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
625 case MLX5_QP_ST_SW_CNAK: return "DC_CNAK";
626 default: return "Invalid transport type";
630 static inline const char *mlx5_qp_state_str(int state)
633 case MLX5_QP_STATE_RST:
635 case MLX5_QP_STATE_INIT:
637 case MLX5_QP_STATE_RTR:
639 case MLX5_QP_STATE_RTS:
641 case MLX5_QP_STATE_SQER:
643 case MLX5_QP_STATE_SQD:
645 case MLX5_QP_STATE_ERR:
647 case MLX5_QP_STATE_SQ_DRAINING:
648 return "SQ_DRAINING";
649 case MLX5_QP_STATE_SUSPENDED:
651 default: return "Invalid QP state";
655 #endif /* MLX5_QP_H */