2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <dev/mlx5/device.h>
32 #include <dev/mlx5/driver.h>
33 #include <dev/mlx5/mlx5_ifc.h>
35 #define MLX5_INVALID_LKEY 0x100
36 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
37 #define MLX5_DIF_SIZE 8
38 #define MLX5_STRIDE_BLOCK_OP 0x400
39 #define MLX5_CPY_GRD_MASK 0xc0
40 #define MLX5_CPY_APP_MASK 0x30
41 #define MLX5_CPY_REF_MASK 0x0f
42 #define MLX5_BSF_INC_REFTAG (1 << 6)
43 #define MLX5_BSF_INL_VALID (1 << 15)
44 #define MLX5_BSF_REFRESH_DIF (1 << 14)
45 #define MLX5_BSF_REPEAT_BLOCK (1 << 7)
46 #define MLX5_BSF_APPTAG_ESCAPE 0x1
47 #define MLX5_BSF_APPREF_ESCAPE 0x2
50 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
51 MLX5_QP_OPTPAR_RRE = 1 << 1,
52 MLX5_QP_OPTPAR_RAE = 1 << 2,
53 MLX5_QP_OPTPAR_RWE = 1 << 3,
54 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
55 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
56 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
57 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
58 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
59 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
60 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
61 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
62 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
63 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
64 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
65 MLX5_QP_OPTPAR_SRQN = 1 << 18,
66 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
67 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
68 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
72 MLX5_QP_STATE_RST = 0,
73 MLX5_QP_STATE_INIT = 1,
74 MLX5_QP_STATE_RTR = 2,
75 MLX5_QP_STATE_RTS = 3,
76 MLX5_QP_STATE_SQER = 4,
77 MLX5_QP_STATE_SQD = 5,
78 MLX5_QP_STATE_ERR = 6,
79 MLX5_QP_STATE_SQ_DRAINING = 7,
80 MLX5_QP_STATE_SUSPENDED = 9,
94 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
95 MLX5_QP_ST_RAW_IPV6 = 0xa,
96 MLX5_QP_ST_SNIFFER = 0xb,
97 MLX5_QP_ST_SYNC_UMR = 0xe,
98 MLX5_QP_ST_PTP_1588 = 0xd,
99 MLX5_QP_ST_REG_UMR = 0xc,
100 MLX5_QP_ST_SW_CNAK = 0x10,
105 MLX5_NON_ZERO_RQ = 0 << 24,
106 MLX5_SRQ_RQ = 1 << 24,
107 MLX5_CRQ_RQ = 2 << 24,
108 MLX5_ZERO_LEN_RQ = 3 << 24
113 MLX5_QP_BIT_SRE = 1 << 15,
114 MLX5_QP_BIT_SWE = 1 << 14,
115 MLX5_QP_BIT_SAE = 1 << 13,
117 MLX5_QP_BIT_RRE = 1 << 15,
118 MLX5_QP_BIT_RWE = 1 << 14,
119 MLX5_QP_BIT_RAE = 1 << 13,
120 MLX5_QP_BIT_RIC = 1 << 4,
121 MLX5_QP_BIT_COLL_SYNC_RQ = 1 << 2,
122 MLX5_QP_BIT_COLL_SYNC_SQ = 1 << 1,
123 MLX5_QP_BIT_COLL_MASTER = 1 << 0
127 MLX5_DCT_BIT_RRE = 1 << 19,
128 MLX5_DCT_BIT_RWE = 1 << 18,
129 MLX5_DCT_BIT_RAE = 1 << 17,
133 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
134 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
135 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
139 MLX5_SEND_WQE_DS = 16,
140 MLX5_SEND_WQE_BB = 64,
143 #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
146 MLX5_SEND_WQE_MAX_WQEBBS = 16,
150 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
151 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
152 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
153 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
154 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
158 MLX5_FENCE_MODE_NONE = 0 << 5,
159 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
160 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
161 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
165 MLX5_QP_DRAIN_SIGERR = 1 << 26,
166 MLX5_QP_LAT_SENSITIVE = 1 << 28,
167 MLX5_QP_BLOCK_MCAST = 1 << 30,
168 MLX5_QP_ENABLE_SIG = 1 << 31,
177 MLX5_FLAGS_INLINE = 1<<7,
178 MLX5_FLAGS_CHECK_FREE = 1<<5,
181 struct mlx5_wqe_fmr_seg {
192 struct mlx5_wqe_ctrl_seg {
193 __be32 opmod_idx_opcode;
202 MLX5_MLX_FLAG_MASK_VL15 = 0x40,
203 MLX5_MLX_FLAG_MASK_SLR = 0x20,
204 MLX5_MLX_FLAG_MASK_ICRC = 0x8,
205 MLX5_MLX_FLAG_MASK_FL = 4
208 struct mlx5_mlx_seg {
217 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
218 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
219 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
220 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
223 struct mlx5_wqe_eth_seg {
229 __be16 inline_hdr_sz;
230 u8 inline_hdr_start[2];
233 struct mlx5_wqe_xrc_seg {
238 struct mlx5_wqe_masked_atomic_seg {
241 __be64 swap_add_mask;
268 struct mlx5_wqe_datagram_seg {
272 struct mlx5_wqe_raddr_seg {
278 struct mlx5_wqe_atomic_seg {
283 struct mlx5_wqe_data_seg {
289 struct mlx5_wqe_umr_ctrl_seg {
292 __be16 klm_octowords;
293 __be16 bsf_octowords;
298 struct mlx5_seg_set_psv {
302 __be32 transient_sig;
306 struct mlx5_seg_get_psv {
314 struct mlx5_seg_check_psv {
316 __be16 err_coalescing_op;
320 __be16 xport_err_mask;
328 struct mlx5_rwqe_sig {
334 struct mlx5_wqe_signature_seg {
340 struct mlx5_wqe_inline_seg {
349 struct mlx5_bsf_inl {
356 u8 dif_inc_ref_guard_check;
357 __be16 dif_app_bitmask_check;
361 struct mlx5_bsf_basic {
373 __be32 raw_data_size;
377 struct mlx5_bsf_ext {
378 __be32 t_init_gen_pro_size;
379 __be32 rsvd_epi_size;
383 struct mlx5_bsf_inl w_inl;
384 struct mlx5_bsf_inl m_inl;
393 struct mlx5_stride_block_entry {
400 struct mlx5_stride_block_ctrl_seg {
401 __be32 bcount_per_cycle;
408 struct mlx5_core_qp {
409 struct mlx5_core_rsc_common common; /* must be first */
410 void (*event) (struct mlx5_core_qp *, int);
412 struct mlx5_rsc_debug *dbg;
416 struct mlx5_qp_path {
427 __be32 tclass_flowlabel;
440 struct mlx5_qp_context {
446 __be32 qp_counter_set_usr_page;
448 __be32 log_pg_sz_remote_qpn;
449 struct mlx5_qp_path pri_path;
450 struct mlx5_qp_path alt_path;
453 __be32 next_send_psn;
456 __be32 last_acked_psn;
459 __be32 rnr_nextrecvpsn;
466 __be16 hw_sq_wqe_counter;
467 __be16 sw_sq_wqe_counter;
468 __be16 hw_rcyclic_byte_counter;
469 __be16 hw_rq_counter;
470 __be16 sw_rcyclic_byte_counter;
471 __be16 sw_rq_counter;
476 __be64 dc_access_key;
480 struct mlx5_create_qp_mbox_in {
481 struct mlx5_inbox_hdr hdr;
484 __be32 opt_param_mask;
486 struct mlx5_qp_context ctx;
491 struct mlx5_dct_context {
502 __be32 tclass_flow_label;
511 __be32 access_violations;
515 struct mlx5_create_dct_mbox_in {
516 struct mlx5_inbox_hdr hdr;
518 struct mlx5_dct_context context;
522 struct mlx5_create_dct_mbox_out {
523 struct mlx5_outbox_hdr hdr;
528 struct mlx5_destroy_dct_mbox_in {
529 struct mlx5_inbox_hdr hdr;
534 struct mlx5_destroy_dct_mbox_out {
535 struct mlx5_outbox_hdr hdr;
539 struct mlx5_drain_dct_mbox_in {
540 struct mlx5_inbox_hdr hdr;
545 struct mlx5_drain_dct_mbox_out {
546 struct mlx5_outbox_hdr hdr;
550 struct mlx5_create_qp_mbox_out {
551 struct mlx5_outbox_hdr hdr;
556 struct mlx5_destroy_qp_mbox_in {
557 struct mlx5_inbox_hdr hdr;
562 struct mlx5_destroy_qp_mbox_out {
563 struct mlx5_outbox_hdr hdr;
567 struct mlx5_modify_qp_mbox_in {
568 struct mlx5_inbox_hdr hdr;
573 struct mlx5_qp_context ctx;
577 struct mlx5_modify_qp_mbox_out {
578 struct mlx5_outbox_hdr hdr;
582 struct mlx5_query_qp_mbox_in {
583 struct mlx5_inbox_hdr hdr;
588 struct mlx5_query_qp_mbox_out {
589 struct mlx5_outbox_hdr hdr;
593 struct mlx5_qp_context ctx;
598 struct mlx5_query_dct_mbox_in {
599 struct mlx5_inbox_hdr hdr;
604 struct mlx5_query_dct_mbox_out {
605 struct mlx5_outbox_hdr hdr;
607 struct mlx5_dct_context ctx;
611 struct mlx5_arm_dct_mbox_in {
612 struct mlx5_inbox_hdr hdr;
617 struct mlx5_arm_dct_mbox_out {
618 struct mlx5_outbox_hdr hdr;
622 struct mlx5_conf_sqp_mbox_in {
623 struct mlx5_inbox_hdr hdr;
629 struct mlx5_conf_sqp_mbox_out {
630 struct mlx5_outbox_hdr hdr;
634 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
636 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
639 static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
641 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
644 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
645 struct mlx5_core_qp *qp,
646 struct mlx5_create_qp_mbox_in *in,
648 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
649 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
650 struct mlx5_core_qp *qp);
651 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
652 struct mlx5_core_qp *qp);
653 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
654 struct mlx5_query_qp_mbox_out *out, int outlen);
655 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
656 struct mlx5_query_dct_mbox_out *out);
657 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct);
659 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
660 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
661 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
662 struct mlx5_core_dct *dct,
663 struct mlx5_create_dct_mbox_in *in);
664 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
665 struct mlx5_core_dct *dct);
666 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
667 struct mlx5_core_qp *rq);
668 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
669 struct mlx5_core_qp *rq);
670 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
671 struct mlx5_core_qp *sq);
672 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
673 struct mlx5_core_qp *sq);
674 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
675 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
676 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
677 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
679 static inline const char *mlx5_qp_type_str(int type)
682 case MLX5_QP_ST_RC: return "RC";
683 case MLX5_QP_ST_UC: return "C";
684 case MLX5_QP_ST_UD: return "UD";
685 case MLX5_QP_ST_XRC: return "XRC";
686 case MLX5_QP_ST_MLX: return "MLX";
687 case MLX5_QP_ST_DCI: return "DCI";
688 case MLX5_QP_ST_QP0: return "QP0";
689 case MLX5_QP_ST_QP1: return "QP1";
690 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
691 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
692 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
693 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
694 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
695 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
696 case MLX5_QP_ST_SW_CNAK: return "DC_CNAK";
697 default: return "Invalid transport type";
701 static inline const char *mlx5_qp_state_str(int state)
704 case MLX5_QP_STATE_RST:
706 case MLX5_QP_STATE_INIT:
708 case MLX5_QP_STATE_RTR:
710 case MLX5_QP_STATE_RTS:
712 case MLX5_QP_STATE_SQER:
714 case MLX5_QP_STATE_SQD:
716 case MLX5_QP_STATE_ERR:
718 case MLX5_QP_STATE_SQ_DRAINING:
719 return "SQ_DRAINING";
720 case MLX5_QP_STATE_SUSPENDED:
722 default: return "Invalid QP state";
726 #endif /* MLX5_QP_H */