2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <linux/types.h>
32 #include <rdma/ib_verbs.h>
33 #include <dev/mlx5/mlx5_ifc.h>
35 #define FW_INIT_TIMEOUT_MILI 2000
36 #define FW_INIT_WAIT_MS 2
38 #if defined(__LITTLE_ENDIAN)
39 #define MLX5_SET_HOST_ENDIANNESS 0
40 #elif defined(__BIG_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS 0x80
43 #error Host endianness not defined
47 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
48 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
49 #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
50 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
51 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
52 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
53 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
54 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
55 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
57 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
58 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
59 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
60 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
61 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
62 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
63 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
64 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
66 /* insert a value to a struct */
67 #define MLX5_SET(typ, p, fld, v) do { \
68 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
69 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
70 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
71 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
72 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
73 << __mlx5_dw_bit_off(typ, fld))); \
76 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
77 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
78 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
79 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
80 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
81 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
82 << __mlx5_dw_bit_off(typ, fld))); \
85 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
86 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
87 __mlx5_mask(typ, fld))
89 #define MLX5_GET_PR(typ, p, fld) ({ \
90 u32 ___t = MLX5_GET(typ, p, fld); \
91 pr_debug(#fld " = 0x%x\n", ___t); \
95 #define MLX5_SET64(typ, p, fld, v) do { \
96 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
97 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
98 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
101 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
104 MLX5_MAX_COMMANDS = 32,
105 MLX5_CMD_DATA_BLOCK_SIZE = 512,
106 MLX5_PCI_CMD_XPORT = 7,
107 MLX5_MKEY_BSF_OCTO_SIZE = 4,
112 MLX5_EXTENDED_UD_AV = 0x80000000,
116 MLX5_CQ_FLAGS_OI = 2,
120 MLX5_STAT_RATE_OFFSET = 5,
124 MLX5_INLINE_SEG = 0x80000000,
128 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
132 MLX5_MIN_PKEY_TABLE_SIZE = 128,
133 MLX5_MAX_LOG_PKEY_TABLE = 5,
137 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
141 MLX5_PERM_LOCAL_READ = 1 << 2,
142 MLX5_PERM_LOCAL_WRITE = 1 << 3,
143 MLX5_PERM_REMOTE_READ = 1 << 4,
144 MLX5_PERM_REMOTE_WRITE = 1 << 5,
145 MLX5_PERM_ATOMIC = 1 << 6,
146 MLX5_PERM_UMR_EN = 1 << 7,
150 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
151 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
152 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
153 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
154 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
158 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
159 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
160 MLX5_MKEY_BSF_EN = 1 << 30,
161 MLX5_MKEY_LEN64 = 1 << 31,
170 MLX5_BF_REGS_PER_PAGE = 4,
171 MLX5_MAX_UAR_PAGES = 1 << 8,
172 MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
173 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
177 MLX5_MKEY_MASK_LEN = 1ull << 0,
178 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
179 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
180 MLX5_MKEY_MASK_PD = 1ull << 7,
181 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
182 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
183 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
184 MLX5_MKEY_MASK_KEY = 1ull << 13,
185 MLX5_MKEY_MASK_QPN = 1ull << 14,
186 MLX5_MKEY_MASK_LR = 1ull << 17,
187 MLX5_MKEY_MASK_LW = 1ull << 18,
188 MLX5_MKEY_MASK_RR = 1ull << 19,
189 MLX5_MKEY_MASK_RW = 1ull << 20,
190 MLX5_MKEY_MASK_A = 1ull << 21,
191 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
192 MLX5_MKEY_MASK_FREE = 1ull << 29,
196 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
198 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
199 MLX5_UMR_CHECK_FREE = (2 << 5),
201 MLX5_UMR_INLINE = (1 << 7),
204 #define MLX5_UMR_MTT_ALIGNMENT 0x40
205 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
206 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
209 MLX5_EVENT_QUEUE_TYPE_QP = 0,
210 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
211 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
215 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
216 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
217 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
218 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
219 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
220 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
221 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
225 MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
226 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
227 MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
228 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
229 MLX5_MAX_INLINE_RECEIVE_SIZE = 64
233 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
234 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
235 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
236 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
237 MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21,
238 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
239 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
240 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
241 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33,
242 MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34,
243 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
244 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
245 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
246 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48,
250 MLX5_ROCE_VERSION_1 = 0,
251 MLX5_ROCE_VERSION_1_5 = 1,
252 MLX5_ROCE_VERSION_2 = 2,
256 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
257 MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5,
258 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
262 MLX5_ROCE_L3_TYPE_IPV4 = 0,
263 MLX5_ROCE_L3_TYPE_IPV6 = 1,
267 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
268 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
272 MLX5_OPCODE_NOP = 0x00,
273 MLX5_OPCODE_SEND_INVAL = 0x01,
274 MLX5_OPCODE_RDMA_WRITE = 0x08,
275 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
276 MLX5_OPCODE_SEND = 0x0a,
277 MLX5_OPCODE_SEND_IMM = 0x0b,
278 MLX5_OPCODE_LSO = 0x0e,
279 MLX5_OPCODE_RDMA_READ = 0x10,
280 MLX5_OPCODE_ATOMIC_CS = 0x11,
281 MLX5_OPCODE_ATOMIC_FA = 0x12,
282 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
283 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
284 MLX5_OPCODE_BIND_MW = 0x18,
285 MLX5_OPCODE_CONFIG_CMD = 0x1f,
287 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
288 MLX5_RECV_OPCODE_SEND = 0x01,
289 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
290 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
292 MLX5_CQE_OPCODE_ERROR = 0x1e,
293 MLX5_CQE_OPCODE_RESIZE = 0x16,
295 MLX5_OPCODE_SET_PSV = 0x20,
296 MLX5_OPCODE_GET_PSV = 0x21,
297 MLX5_OPCODE_CHECK_PSV = 0x22,
298 MLX5_OPCODE_RGET_PSV = 0x26,
299 MLX5_OPCODE_RCHECK_PSV = 0x27,
301 MLX5_OPCODE_UMR = 0x25,
303 MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
307 MLX5_SET_PORT_RESET_QKEY = 0,
308 MLX5_SET_PORT_GUID0 = 16,
309 MLX5_SET_PORT_NODE_GUID = 17,
310 MLX5_SET_PORT_SYS_GUID = 18,
311 MLX5_SET_PORT_GID_TABLE = 19,
312 MLX5_SET_PORT_PKEY_TABLE = 20,
316 MLX5_MAX_PAGE_SHIFT = 31
320 MLX5_ADAPTER_PAGE_SHIFT = 12,
321 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
325 MLX5_CAP_OFF_CMDIF_CSUM = 46,
328 struct mlx5_inbox_hdr {
334 struct mlx5_outbox_hdr {
340 struct mlx5_cmd_set_dc_cnak_mbox_in {
341 struct mlx5_inbox_hdr hdr;
347 struct mlx5_cmd_set_dc_cnak_mbox_out {
348 struct mlx5_outbox_hdr hdr;
352 struct mlx5_cmd_layout {
368 struct mlx5_health_buffer {
369 __be32 assert_var[5];
371 __be32 assert_exit_ptr;
372 __be32 assert_callra;
382 struct mlx5_init_seg {
384 __be32 cmdif_rev_fw_sub;
387 __be32 cmdq_addr_l_sz;
391 struct mlx5_health_buffer health;
393 __be32 internal_timer_h;
394 __be32 internal_timer_l;
396 __be32 health_counter;
399 __be32 ieee1588_clk_type;
403 struct mlx5_eqe_comp {
408 struct mlx5_eqe_qp_srq {
413 struct mlx5_eqe_cq_err {
419 struct mlx5_eqe_port_state {
424 struct mlx5_eqe_gpio {
429 struct mlx5_eqe_congestion {
435 struct mlx5_eqe_stall_vl {
440 struct mlx5_eqe_cmd {
445 struct mlx5_eqe_page_req {
452 struct mlx5_eqe_vport_change {
459 #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
460 #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
463 MLX5_MODULE_STATUS_PLUGGED = 0x1,
464 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
465 MLX5_MODULE_STATUS_ERROR = 0x3,
469 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
470 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1,
471 MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
472 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
473 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
474 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
475 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
476 MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7,
479 struct mlx5_eqe_port_module_event {
490 struct mlx5_eqe_cmd cmd;
491 struct mlx5_eqe_comp comp;
492 struct mlx5_eqe_qp_srq qp_srq;
493 struct mlx5_eqe_cq_err cq_err;
494 struct mlx5_eqe_port_state port;
495 struct mlx5_eqe_gpio gpio;
496 struct mlx5_eqe_congestion cong;
497 struct mlx5_eqe_stall_vl stall_vl;
498 struct mlx5_eqe_page_req req_pages;
499 struct mlx5_eqe_port_module_event port_module_event;
500 struct mlx5_eqe_vport_change vport_change;
515 struct mlx5_cmd_prot_block {
516 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
527 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
530 struct mlx5_err_cqe {
536 __be32 s_wqe_opcode_qpn;
545 u8 lro_tcppsh_abort_dupack;
548 __be32 lro_ack_seq_num;
549 __be32 rss_hash_result;
559 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
560 __be32 imm_inval_pkey;
570 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
572 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
575 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
577 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
580 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
582 return (cqe->l4_hdr_type_etc >> 4) & 0x7;
585 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
587 return be16_to_cpu(cqe->vlan_info) & 0xfff;
590 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
592 memcpy(smac, &cqe->rss_hash_type , 4);
593 memcpy(smac + 4, &cqe->slid , 2);
596 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
598 return cqe->l4_hdr_type_etc & 0x1;
601 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
603 return cqe->tunneled_etc & 0x1;
607 CQE_L4_HDR_TYPE_NONE = 0x0,
608 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
609 CQE_L4_HDR_TYPE_UDP = 0x2,
610 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
611 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
615 /* source L3 hash types */
616 CQE_RSS_SRC_HTYPE_IP = 0x3 << 0,
617 CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0,
618 CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0,
620 /* destination L3 hash types */
621 CQE_RSS_DST_HTYPE_IP = 0x3 << 2,
622 CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2,
623 CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2,
625 /* source L4 hash types */
626 CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4,
627 CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4,
628 CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4,
629 CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4,
631 /* destination L4 hash types */
632 CQE_RSS_DST_HTYPE_L4 = 0x3 << 6,
633 CQE_RSS_DST_HTYPE_TCP = 0x1 << 6,
634 CQE_RSS_DST_HTYPE_UDP = 0x2 << 6,
635 CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6,
639 CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
640 CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
641 CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
650 struct mlx5_sig_err_cqe {
652 __be32 expected_trans_sig;
653 __be32 actual_trans_sig;
654 __be32 expected_reftag;
655 __be32 actual_reftag;
667 struct mlx5_wqe_srq_next_seg {
669 __be16 next_wqe_index;
680 union mlx5_ext_cqe inl_grh;
681 struct mlx5_cqe64 cqe64;
684 struct mlx5_srq_ctx {
699 struct mlx5_create_srq_mbox_in {
700 struct mlx5_inbox_hdr hdr;
703 struct mlx5_srq_ctx ctx;
708 struct mlx5_create_srq_mbox_out {
709 struct mlx5_outbox_hdr hdr;
714 struct mlx5_destroy_srq_mbox_in {
715 struct mlx5_inbox_hdr hdr;
720 struct mlx5_destroy_srq_mbox_out {
721 struct mlx5_outbox_hdr hdr;
725 struct mlx5_query_srq_mbox_in {
726 struct mlx5_inbox_hdr hdr;
731 struct mlx5_query_srq_mbox_out {
732 struct mlx5_outbox_hdr hdr;
734 struct mlx5_srq_ctx ctx;
739 struct mlx5_arm_srq_mbox_in {
740 struct mlx5_inbox_hdr hdr;
746 struct mlx5_arm_srq_mbox_out {
747 struct mlx5_outbox_hdr hdr;
751 struct mlx5_cq_context {
758 __be32 log_sz_usr_page;
765 __be32 last_notified_index;
766 __be32 solicit_producer_index;
767 __be32 consumer_counter;
768 __be32 producer_counter;
770 __be64 db_record_addr;
773 struct mlx5_create_cq_mbox_in {
774 struct mlx5_inbox_hdr hdr;
777 struct mlx5_cq_context ctx;
782 struct mlx5_create_cq_mbox_out {
783 struct mlx5_outbox_hdr hdr;
788 struct mlx5_destroy_cq_mbox_in {
789 struct mlx5_inbox_hdr hdr;
794 struct mlx5_destroy_cq_mbox_out {
795 struct mlx5_outbox_hdr hdr;
799 struct mlx5_query_cq_mbox_in {
800 struct mlx5_inbox_hdr hdr;
805 struct mlx5_query_cq_mbox_out {
806 struct mlx5_outbox_hdr hdr;
808 struct mlx5_cq_context ctx;
813 struct mlx5_modify_cq_mbox_in {
814 struct mlx5_inbox_hdr hdr;
817 struct mlx5_cq_context ctx;
822 struct mlx5_modify_cq_mbox_out {
823 struct mlx5_outbox_hdr hdr;
827 struct mlx5_eq_context {
833 __be32 log_sz_usr_page;
838 __be32 consumer_counter;
839 __be32 produser_counter;
843 struct mlx5_create_eq_mbox_in {
844 struct mlx5_inbox_hdr hdr;
848 struct mlx5_eq_context ctx;
855 struct mlx5_create_eq_mbox_out {
856 struct mlx5_outbox_hdr hdr;
862 struct mlx5_map_eq_mbox_in {
863 struct mlx5_inbox_hdr hdr;
871 struct mlx5_map_eq_mbox_out {
872 struct mlx5_outbox_hdr hdr;
876 struct mlx5_query_eq_mbox_in {
877 struct mlx5_inbox_hdr hdr;
883 struct mlx5_query_eq_mbox_out {
884 struct mlx5_outbox_hdr hdr;
886 struct mlx5_eq_context ctx;
890 MLX5_MKEY_STATUS_FREE = 1 << 6,
893 struct mlx5_mkey_seg {
894 /* This is a two bit field occupying bits 31-30.
895 * bit 31 is always 0,
896 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
907 __be32 bsfs_octo_size;
915 struct mlx5_query_special_ctxs_mbox_in {
916 struct mlx5_inbox_hdr hdr;
920 struct mlx5_query_special_ctxs_mbox_out {
921 struct mlx5_outbox_hdr hdr;
922 __be32 dump_fill_mkey;
923 __be32 reserved_lkey;
926 struct mlx5_create_mkey_mbox_in {
927 struct mlx5_inbox_hdr hdr;
928 __be32 input_mkey_index;
930 struct mlx5_mkey_seg seg;
932 __be32 xlat_oct_act_size;
938 struct mlx5_create_mkey_mbox_out {
939 struct mlx5_outbox_hdr hdr;
944 struct mlx5_query_mkey_mbox_in {
945 struct mlx5_inbox_hdr hdr;
949 struct mlx5_query_mkey_mbox_out {
950 struct mlx5_outbox_hdr hdr;
954 struct mlx5_modify_mkey_mbox_in {
955 struct mlx5_inbox_hdr hdr;
960 struct mlx5_modify_mkey_mbox_out {
961 struct mlx5_outbox_hdr hdr;
965 struct mlx5_dump_mkey_mbox_in {
966 struct mlx5_inbox_hdr hdr;
969 struct mlx5_dump_mkey_mbox_out {
970 struct mlx5_outbox_hdr hdr;
974 struct mlx5_mad_ifc_mbox_in {
975 struct mlx5_inbox_hdr hdr;
983 struct mlx5_mad_ifc_mbox_out {
984 struct mlx5_outbox_hdr hdr;
989 struct mlx5_access_reg_mbox_in {
990 struct mlx5_inbox_hdr hdr;
997 struct mlx5_access_reg_mbox_out {
998 struct mlx5_outbox_hdr hdr;
1003 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1006 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1009 struct mlx5_allocate_psv_in {
1010 struct mlx5_inbox_hdr hdr;
1015 struct mlx5_allocate_psv_out {
1016 struct mlx5_outbox_hdr hdr;
1021 struct mlx5_destroy_psv_in {
1022 struct mlx5_inbox_hdr hdr;
1027 struct mlx5_destroy_psv_out {
1028 struct mlx5_outbox_hdr hdr;
1032 static inline int mlx5_host_is_le(void)
1034 #if defined(__LITTLE_ENDIAN)
1036 #elif defined(__BIG_ENDIAN)
1039 #error Host endianness not defined
1043 #define MLX5_CMD_OP_MAX 0x939
1046 VPORT_STATE_DOWN = 0x0,
1047 VPORT_STATE_UP = 0x1,
1051 MLX5_L3_PROT_TYPE_IPV4 = 0,
1052 MLX5_L3_PROT_TYPE_IPV6 = 1,
1056 MLX5_L4_PROT_TYPE_TCP = 0,
1057 MLX5_L4_PROT_TYPE_UDP = 1,
1061 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1062 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1063 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1064 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1065 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1069 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1070 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1071 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1076 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1077 MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2,
1078 MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
1079 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1080 MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
1081 MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
1085 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0,
1086 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
1087 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2
1091 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0,
1092 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1,
1093 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
1094 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
1098 MLX5_UC_ADDR_CHANGE = (1 << 0),
1099 MLX5_MC_ADDR_CHANGE = (1 << 1),
1100 MLX5_VLAN_CHANGE = (1 << 2),
1101 MLX5_PROMISC_CHANGE = (1 << 3),
1102 MLX5_MTU_CHANGE = (1 << 4),
1105 enum mlx5_list_type {
1106 MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0,
1107 MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1,
1108 MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
1112 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
1113 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
1114 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
1120 enum mlx5_cap_mode {
1121 HCA_CAP_OPMOD_GET_MAX = 0,
1122 HCA_CAP_OPMOD_GET_CUR = 1,
1125 enum mlx5_cap_type {
1126 MLX5_CAP_GENERAL = 0,
1127 MLX5_CAP_ETHERNET_OFFLOADS,
1131 MLX5_CAP_IPOIB_OFFLOADS,
1132 MLX5_CAP_EOIB_OFFLOADS,
1133 MLX5_CAP_FLOW_TABLE,
1134 MLX5_CAP_ESWITCH_FLOW_TABLE,
1137 MLX5_CAP_VECTOR_CALC,
1140 /* NUM OF CAP Types */
1144 /* GET Dev Caps macros */
1145 #define MLX5_CAP_GEN(mdev, cap) \
1146 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1148 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1149 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1151 #define MLX5_CAP_ETH(mdev, cap) \
1152 MLX5_GET(per_protocol_networking_offload_caps,\
1153 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1155 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1156 MLX5_GET(per_protocol_networking_offload_caps,\
1157 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1159 #define MLX5_CAP_ROCE(mdev, cap) \
1160 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1162 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1163 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1165 #define MLX5_CAP_ATOMIC(mdev, cap) \
1166 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1168 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1169 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1171 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1172 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1174 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1175 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1177 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1178 MLX5_GET(flow_table_eswitch_cap, \
1179 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1181 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1182 MLX5_GET(flow_table_eswitch_cap, \
1183 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1185 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1186 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1188 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1189 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1191 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1192 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1194 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1195 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1197 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1198 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1200 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1201 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1203 #define MLX5_CAP_ESW(mdev, cap) \
1204 MLX5_GET(e_switch_cap, \
1205 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1207 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1208 MLX5_GET(e_switch_cap, \
1209 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1211 #define MLX5_CAP_ODP(mdev, cap)\
1212 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1214 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1215 MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
1217 #define MLX5_CAP_SNAPSHOT(mdev, cap) \
1218 MLX5_GET(snapshot_cap, \
1219 mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
1221 #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
1222 MLX5_GET(snapshot_cap, \
1223 mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
1225 #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
1226 MLX5_GET(per_protocol_networking_offload_caps,\
1227 mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
1229 #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
1230 MLX5_GET(per_protocol_networking_offload_caps,\
1231 mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
1233 #define MLX5_CAP_DEBUG(mdev, cap) \
1234 MLX5_GET(debug_cap, \
1235 mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
1237 #define MLX5_CAP_DEBUG_MAX(mdev, cap) \
1238 MLX5_GET(debug_cap, \
1239 mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
1241 #define MLX5_CAP_QOS(mdev, cap) \
1243 mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1245 #define MLX5_CAP_QOS_MAX(mdev, cap) \
1247 mdev->hca_caps_max[MLX5_CAP_QOS], cap)
1250 MLX5_CMD_STAT_OK = 0x0,
1251 MLX5_CMD_STAT_INT_ERR = 0x1,
1252 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1253 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1254 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1255 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1256 MLX5_CMD_STAT_RES_BUSY = 0x6,
1257 MLX5_CMD_STAT_LIM_ERR = 0x8,
1258 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1259 MLX5_CMD_STAT_IX_ERR = 0xa,
1260 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1261 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1262 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1263 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1264 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1265 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1269 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1270 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1271 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1272 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1273 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1274 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1275 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1276 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1280 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1281 MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1,
1282 MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
1286 MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
1287 MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
1291 NUM_DRIVER_UARS = 4,
1292 NUM_LOW_LAT_UUARS = 4,
1296 MLX5_CAP_PORT_TYPE_IB = 0x0,
1297 MLX5_CAP_PORT_TYPE_ETH = 0x1,
1301 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0,
1302 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
1303 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
1307 MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
1310 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1312 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1314 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1317 struct mlx5_ifc_mcia_reg_bits {
1324 u8 i2c_device_address[0x8];
1325 u8 page_number[0x8];
1326 u8 device_address[0x10];
1328 u8 reserved_2[0x10];
1331 u8 reserved_3[0x20];
1347 #define MLX5_CMD_OP_QUERY_EEPROM 0x93c
1349 struct mlx5_mini_cqe8 {
1363 MLX5_NO_INLINE_DATA,
1364 MLX5_INLINE_DATA32_SEG,
1365 MLX5_INLINE_DATA64_SEG,
1369 enum mlx5_exp_cqe_zip_recv_type {
1370 MLX5_CQE_FORMAT_HASH,
1371 MLX5_CQE_FORMAT_CSUM,
1374 #define MLX5E_CQE_FORMAT_MASK 0xc
1375 static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
1377 return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
1380 /* 8 regular priorities + 1 for multicast */
1381 #define MLX5_NUM_BYPASS_FTS 9
1383 #endif /* MLX5_DEVICE_H */