2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <linux/types.h>
32 #include <rdma/ib_verbs.h>
33 #include <dev/mlx5/mlx5_ifc.h>
35 #define FW_INIT_TIMEOUT_MILI 2000
36 #define FW_INIT_WAIT_MS 2
38 #if defined(__LITTLE_ENDIAN)
39 #define MLX5_SET_HOST_ENDIANNESS 0
40 #elif defined(__BIG_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS 0x80
43 #error Host endianness not defined
47 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
48 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
49 #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
50 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
51 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
52 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
53 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
54 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
55 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
57 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
58 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
59 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
60 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
61 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
62 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
63 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
64 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
66 /* insert a value to a struct */
67 #define MLX5_SET(typ, p, fld, v) do { \
68 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
69 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
70 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
71 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
72 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
73 << __mlx5_dw_bit_off(typ, fld))); \
76 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
77 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
78 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
79 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
80 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
81 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
82 << __mlx5_dw_bit_off(typ, fld))); \
85 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
86 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
87 __mlx5_mask(typ, fld))
89 #define MLX5_GET_PR(typ, p, fld) ({ \
90 u32 ___t = MLX5_GET(typ, p, fld); \
91 pr_debug(#fld " = 0x%x\n", ___t); \
95 #define MLX5_SET64(typ, p, fld, v) do { \
96 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
97 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
98 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
101 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
104 MLX5_MAX_COMMANDS = 32,
105 MLX5_CMD_DATA_BLOCK_SIZE = 512,
106 MLX5_CMD_MBOX_SIZE = 1024,
107 MLX5_PCI_CMD_XPORT = 7,
108 MLX5_MKEY_BSF_OCTO_SIZE = 4,
113 MLX5_EXTENDED_UD_AV = 0x80000000,
117 MLX5_CQ_FLAGS_OI = 2,
121 MLX5_STAT_RATE_OFFSET = 5,
125 MLX5_INLINE_SEG = 0x80000000,
129 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
133 MLX5_MIN_PKEY_TABLE_SIZE = 128,
134 MLX5_MAX_LOG_PKEY_TABLE = 5,
138 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
142 MLX5_PERM_LOCAL_READ = 1 << 2,
143 MLX5_PERM_LOCAL_WRITE = 1 << 3,
144 MLX5_PERM_REMOTE_READ = 1 << 4,
145 MLX5_PERM_REMOTE_WRITE = 1 << 5,
146 MLX5_PERM_ATOMIC = 1 << 6,
147 MLX5_PERM_UMR_EN = 1 << 7,
151 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
152 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
153 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
154 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
155 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
159 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
160 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
161 MLX5_MKEY_BSF_EN = 1 << 30,
162 MLX5_MKEY_LEN64 = 1 << 31,
171 MLX5_BF_REGS_PER_PAGE = 4,
172 MLX5_MAX_UAR_PAGES = 1 << 8,
173 MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
174 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
178 MLX5_MKEY_MASK_LEN = 1ull << 0,
179 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
180 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
181 MLX5_MKEY_MASK_PD = 1ull << 7,
182 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
183 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
184 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
185 MLX5_MKEY_MASK_KEY = 1ull << 13,
186 MLX5_MKEY_MASK_QPN = 1ull << 14,
187 MLX5_MKEY_MASK_LR = 1ull << 17,
188 MLX5_MKEY_MASK_LW = 1ull << 18,
189 MLX5_MKEY_MASK_RR = 1ull << 19,
190 MLX5_MKEY_MASK_RW = 1ull << 20,
191 MLX5_MKEY_MASK_A = 1ull << 21,
192 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
193 MLX5_MKEY_MASK_FREE = 1ull << 29,
197 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
199 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
200 MLX5_UMR_CHECK_FREE = (2 << 5),
202 MLX5_UMR_INLINE = (1 << 7),
205 #define MLX5_UMR_MTT_ALIGNMENT 0x40
206 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
207 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
210 MLX5_EVENT_QUEUE_TYPE_QP = 0,
211 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
212 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
216 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
217 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
218 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
219 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
220 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
221 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
222 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
226 MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
227 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
228 MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
229 MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
230 MLX5_MAX_INLINE_RECEIVE_SIZE = 64
234 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
235 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
236 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
237 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
238 MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21,
239 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
240 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
241 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
242 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33,
243 MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34,
244 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
245 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
246 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
247 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48,
251 MLX5_ROCE_VERSION_1 = 0,
252 MLX5_ROCE_VERSION_1_5 = 1,
253 MLX5_ROCE_VERSION_2 = 2,
257 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
258 MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5,
259 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
263 MLX5_ROCE_L3_TYPE_IPV4 = 0,
264 MLX5_ROCE_L3_TYPE_IPV6 = 1,
268 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
269 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
273 MLX5_OPCODE_NOP = 0x00,
274 MLX5_OPCODE_SEND_INVAL = 0x01,
275 MLX5_OPCODE_RDMA_WRITE = 0x08,
276 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
277 MLX5_OPCODE_SEND = 0x0a,
278 MLX5_OPCODE_SEND_IMM = 0x0b,
279 MLX5_OPCODE_LSO = 0x0e,
280 MLX5_OPCODE_RDMA_READ = 0x10,
281 MLX5_OPCODE_ATOMIC_CS = 0x11,
282 MLX5_OPCODE_ATOMIC_FA = 0x12,
283 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
284 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
285 MLX5_OPCODE_BIND_MW = 0x18,
286 MLX5_OPCODE_CONFIG_CMD = 0x1f,
288 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
289 MLX5_RECV_OPCODE_SEND = 0x01,
290 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
291 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
293 MLX5_CQE_OPCODE_ERROR = 0x1e,
294 MLX5_CQE_OPCODE_RESIZE = 0x16,
296 MLX5_OPCODE_SET_PSV = 0x20,
297 MLX5_OPCODE_GET_PSV = 0x21,
298 MLX5_OPCODE_CHECK_PSV = 0x22,
299 MLX5_OPCODE_RGET_PSV = 0x26,
300 MLX5_OPCODE_RCHECK_PSV = 0x27,
302 MLX5_OPCODE_UMR = 0x25,
304 MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
308 MLX5_SET_PORT_RESET_QKEY = 0,
309 MLX5_SET_PORT_GUID0 = 16,
310 MLX5_SET_PORT_NODE_GUID = 17,
311 MLX5_SET_PORT_SYS_GUID = 18,
312 MLX5_SET_PORT_GID_TABLE = 19,
313 MLX5_SET_PORT_PKEY_TABLE = 20,
317 MLX5_MAX_PAGE_SHIFT = 31
321 MLX5_ADAPTER_PAGE_SHIFT = 12,
322 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
326 MLX5_CAP_OFF_CMDIF_CSUM = 46,
329 struct mlx5_inbox_hdr {
335 struct mlx5_outbox_hdr {
341 struct mlx5_cmd_set_dc_cnak_mbox_in {
342 struct mlx5_inbox_hdr hdr;
348 struct mlx5_cmd_set_dc_cnak_mbox_out {
349 struct mlx5_outbox_hdr hdr;
353 struct mlx5_cmd_layout {
369 struct mlx5_health_buffer {
370 __be32 assert_var[5];
372 __be32 assert_exit_ptr;
373 __be32 assert_callra;
383 struct mlx5_init_seg {
385 __be32 cmdif_rev_fw_sub;
388 __be32 cmdq_addr_l_sz;
392 struct mlx5_health_buffer health;
394 __be32 internal_timer_h;
395 __be32 internal_timer_l;
397 __be32 health_counter;
400 __be32 ieee1588_clk_type;
404 struct mlx5_eqe_comp {
409 struct mlx5_eqe_qp_srq {
414 struct mlx5_eqe_cq_err {
420 struct mlx5_eqe_port_state {
425 struct mlx5_eqe_gpio {
430 struct mlx5_eqe_congestion {
436 struct mlx5_eqe_stall_vl {
441 struct mlx5_eqe_cmd {
446 struct mlx5_eqe_page_req {
453 struct mlx5_eqe_vport_change {
460 #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
461 #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
464 MLX5_MODULE_STATUS_PLUGGED = 0x1,
465 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
466 MLX5_MODULE_STATUS_ERROR = 0x3,
470 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
471 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1,
472 MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
473 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
474 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
475 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
476 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
477 MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7,
480 struct mlx5_eqe_port_module_event {
491 struct mlx5_eqe_cmd cmd;
492 struct mlx5_eqe_comp comp;
493 struct mlx5_eqe_qp_srq qp_srq;
494 struct mlx5_eqe_cq_err cq_err;
495 struct mlx5_eqe_port_state port;
496 struct mlx5_eqe_gpio gpio;
497 struct mlx5_eqe_congestion cong;
498 struct mlx5_eqe_stall_vl stall_vl;
499 struct mlx5_eqe_page_req req_pages;
500 struct mlx5_eqe_port_module_event port_module_event;
501 struct mlx5_eqe_vport_change vport_change;
516 struct mlx5_cmd_prot_block {
517 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
527 #define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \
528 (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE)
529 CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block));
530 CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE);
533 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
536 struct mlx5_err_cqe {
542 __be32 s_wqe_opcode_qpn;
551 u8 lro_tcppsh_abort_dupack;
554 __be32 lro_ack_seq_num;
555 __be32 rss_hash_result;
565 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
566 __be32 imm_inval_pkey;
576 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
578 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
581 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
583 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
586 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
588 return (cqe->l4_hdr_type_etc >> 4) & 0x7;
591 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
593 return be16_to_cpu(cqe->vlan_info) & 0xfff;
596 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
598 memcpy(smac, &cqe->rss_hash_type , 4);
599 memcpy(smac + 4, &cqe->slid , 2);
602 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
604 return cqe->l4_hdr_type_etc & 0x1;
607 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
609 return cqe->tunneled_etc & 0x1;
613 CQE_L4_HDR_TYPE_NONE = 0x0,
614 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
615 CQE_L4_HDR_TYPE_UDP = 0x2,
616 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
617 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
621 /* source L3 hash types */
622 CQE_RSS_SRC_HTYPE_IP = 0x3 << 0,
623 CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0,
624 CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0,
626 /* destination L3 hash types */
627 CQE_RSS_DST_HTYPE_IP = 0x3 << 2,
628 CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2,
629 CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2,
631 /* source L4 hash types */
632 CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4,
633 CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4,
634 CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4,
635 CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4,
637 /* destination L4 hash types */
638 CQE_RSS_DST_HTYPE_L4 = 0x3 << 6,
639 CQE_RSS_DST_HTYPE_TCP = 0x1 << 6,
640 CQE_RSS_DST_HTYPE_UDP = 0x2 << 6,
641 CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6,
645 CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
646 CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
647 CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
656 struct mlx5_sig_err_cqe {
658 __be32 expected_trans_sig;
659 __be32 actual_trans_sig;
660 __be32 expected_reftag;
661 __be32 actual_reftag;
673 struct mlx5_wqe_srq_next_seg {
675 __be16 next_wqe_index;
686 union mlx5_ext_cqe inl_grh;
687 struct mlx5_cqe64 cqe64;
690 struct mlx5_srq_ctx {
705 struct mlx5_create_srq_mbox_in {
706 struct mlx5_inbox_hdr hdr;
709 struct mlx5_srq_ctx ctx;
714 struct mlx5_create_srq_mbox_out {
715 struct mlx5_outbox_hdr hdr;
720 struct mlx5_destroy_srq_mbox_in {
721 struct mlx5_inbox_hdr hdr;
726 struct mlx5_destroy_srq_mbox_out {
727 struct mlx5_outbox_hdr hdr;
731 struct mlx5_query_srq_mbox_in {
732 struct mlx5_inbox_hdr hdr;
737 struct mlx5_query_srq_mbox_out {
738 struct mlx5_outbox_hdr hdr;
740 struct mlx5_srq_ctx ctx;
745 struct mlx5_arm_srq_mbox_in {
746 struct mlx5_inbox_hdr hdr;
752 struct mlx5_arm_srq_mbox_out {
753 struct mlx5_outbox_hdr hdr;
757 struct mlx5_cq_context {
764 __be32 log_sz_usr_page;
771 __be32 last_notified_index;
772 __be32 solicit_producer_index;
773 __be32 consumer_counter;
774 __be32 producer_counter;
776 __be64 db_record_addr;
779 struct mlx5_create_cq_mbox_in {
780 struct mlx5_inbox_hdr hdr;
783 struct mlx5_cq_context ctx;
788 struct mlx5_create_cq_mbox_out {
789 struct mlx5_outbox_hdr hdr;
794 struct mlx5_destroy_cq_mbox_in {
795 struct mlx5_inbox_hdr hdr;
800 struct mlx5_destroy_cq_mbox_out {
801 struct mlx5_outbox_hdr hdr;
805 struct mlx5_query_cq_mbox_in {
806 struct mlx5_inbox_hdr hdr;
811 struct mlx5_query_cq_mbox_out {
812 struct mlx5_outbox_hdr hdr;
814 struct mlx5_cq_context ctx;
819 struct mlx5_modify_cq_mbox_in {
820 struct mlx5_inbox_hdr hdr;
823 struct mlx5_cq_context ctx;
828 struct mlx5_modify_cq_mbox_out {
829 struct mlx5_outbox_hdr hdr;
833 struct mlx5_eq_context {
839 __be32 log_sz_usr_page;
844 __be32 consumer_counter;
845 __be32 produser_counter;
849 struct mlx5_create_eq_mbox_in {
850 struct mlx5_inbox_hdr hdr;
854 struct mlx5_eq_context ctx;
861 struct mlx5_create_eq_mbox_out {
862 struct mlx5_outbox_hdr hdr;
868 struct mlx5_map_eq_mbox_in {
869 struct mlx5_inbox_hdr hdr;
877 struct mlx5_map_eq_mbox_out {
878 struct mlx5_outbox_hdr hdr;
882 struct mlx5_query_eq_mbox_in {
883 struct mlx5_inbox_hdr hdr;
889 struct mlx5_query_eq_mbox_out {
890 struct mlx5_outbox_hdr hdr;
892 struct mlx5_eq_context ctx;
896 MLX5_MKEY_STATUS_FREE = 1 << 6,
899 struct mlx5_mkey_seg {
900 /* This is a two bit field occupying bits 31-30.
901 * bit 31 is always 0,
902 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
913 __be32 bsfs_octo_size;
921 struct mlx5_query_special_ctxs_mbox_in {
922 struct mlx5_inbox_hdr hdr;
926 struct mlx5_query_special_ctxs_mbox_out {
927 struct mlx5_outbox_hdr hdr;
928 __be32 dump_fill_mkey;
929 __be32 reserved_lkey;
932 struct mlx5_create_mkey_mbox_in {
933 struct mlx5_inbox_hdr hdr;
934 __be32 input_mkey_index;
936 struct mlx5_mkey_seg seg;
938 __be32 xlat_oct_act_size;
944 struct mlx5_create_mkey_mbox_out {
945 struct mlx5_outbox_hdr hdr;
950 struct mlx5_query_mkey_mbox_in {
951 struct mlx5_inbox_hdr hdr;
955 struct mlx5_query_mkey_mbox_out {
956 struct mlx5_outbox_hdr hdr;
960 struct mlx5_modify_mkey_mbox_in {
961 struct mlx5_inbox_hdr hdr;
966 struct mlx5_modify_mkey_mbox_out {
967 struct mlx5_outbox_hdr hdr;
971 struct mlx5_dump_mkey_mbox_in {
972 struct mlx5_inbox_hdr hdr;
975 struct mlx5_dump_mkey_mbox_out {
976 struct mlx5_outbox_hdr hdr;
980 struct mlx5_mad_ifc_mbox_in {
981 struct mlx5_inbox_hdr hdr;
989 struct mlx5_mad_ifc_mbox_out {
990 struct mlx5_outbox_hdr hdr;
995 struct mlx5_access_reg_mbox_in {
996 struct mlx5_inbox_hdr hdr;
1003 struct mlx5_access_reg_mbox_out {
1004 struct mlx5_outbox_hdr hdr;
1009 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1012 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1015 struct mlx5_allocate_psv_in {
1016 struct mlx5_inbox_hdr hdr;
1021 struct mlx5_allocate_psv_out {
1022 struct mlx5_outbox_hdr hdr;
1027 struct mlx5_destroy_psv_in {
1028 struct mlx5_inbox_hdr hdr;
1033 struct mlx5_destroy_psv_out {
1034 struct mlx5_outbox_hdr hdr;
1038 static inline int mlx5_host_is_le(void)
1040 #if defined(__LITTLE_ENDIAN)
1042 #elif defined(__BIG_ENDIAN)
1045 #error Host endianness not defined
1049 #define MLX5_CMD_OP_MAX 0x939
1052 VPORT_STATE_DOWN = 0x0,
1053 VPORT_STATE_UP = 0x1,
1057 MLX5_L3_PROT_TYPE_IPV4 = 0,
1058 MLX5_L3_PROT_TYPE_IPV6 = 1,
1062 MLX5_L4_PROT_TYPE_TCP = 0,
1063 MLX5_L4_PROT_TYPE_UDP = 1,
1067 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1068 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1069 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1070 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1071 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1075 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1076 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1077 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1082 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1083 MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2,
1084 MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
1085 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1086 MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
1087 MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
1091 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0,
1092 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
1093 MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2
1097 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0,
1098 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1,
1099 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
1100 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
1104 MLX5_UC_ADDR_CHANGE = (1 << 0),
1105 MLX5_MC_ADDR_CHANGE = (1 << 1),
1106 MLX5_VLAN_CHANGE = (1 << 2),
1107 MLX5_PROMISC_CHANGE = (1 << 3),
1108 MLX5_MTU_CHANGE = (1 << 4),
1111 enum mlx5_list_type {
1112 MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0,
1113 MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1,
1114 MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
1118 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
1119 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
1120 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
1126 enum mlx5_cap_mode {
1127 HCA_CAP_OPMOD_GET_MAX = 0,
1128 HCA_CAP_OPMOD_GET_CUR = 1,
1131 enum mlx5_cap_type {
1132 MLX5_CAP_GENERAL = 0,
1133 MLX5_CAP_ETHERNET_OFFLOADS,
1137 MLX5_CAP_IPOIB_OFFLOADS,
1138 MLX5_CAP_EOIB_OFFLOADS,
1139 MLX5_CAP_FLOW_TABLE,
1140 MLX5_CAP_ESWITCH_FLOW_TABLE,
1143 MLX5_CAP_VECTOR_CALC,
1146 /* NUM OF CAP Types */
1150 /* GET Dev Caps macros */
1151 #define MLX5_CAP_GEN(mdev, cap) \
1152 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1154 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1155 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1157 #define MLX5_CAP_ETH(mdev, cap) \
1158 MLX5_GET(per_protocol_networking_offload_caps,\
1159 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1161 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1162 MLX5_GET(per_protocol_networking_offload_caps,\
1163 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1165 #define MLX5_CAP_ROCE(mdev, cap) \
1166 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1168 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1169 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1171 #define MLX5_CAP_ATOMIC(mdev, cap) \
1172 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1174 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1175 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1177 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1178 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1180 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1181 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1183 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1184 MLX5_GET(flow_table_eswitch_cap, \
1185 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1187 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1188 MLX5_GET(flow_table_eswitch_cap, \
1189 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1191 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1192 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1194 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1195 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1197 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1198 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1200 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1201 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1203 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1204 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1206 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1207 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1209 #define MLX5_CAP_ESW(mdev, cap) \
1210 MLX5_GET(e_switch_cap, \
1211 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1213 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1214 MLX5_GET(e_switch_cap, \
1215 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1217 #define MLX5_CAP_ODP(mdev, cap)\
1218 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1220 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1221 MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
1223 #define MLX5_CAP_SNAPSHOT(mdev, cap) \
1224 MLX5_GET(snapshot_cap, \
1225 mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
1227 #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
1228 MLX5_GET(snapshot_cap, \
1229 mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
1231 #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
1232 MLX5_GET(per_protocol_networking_offload_caps,\
1233 mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
1235 #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
1236 MLX5_GET(per_protocol_networking_offload_caps,\
1237 mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
1239 #define MLX5_CAP_DEBUG(mdev, cap) \
1240 MLX5_GET(debug_cap, \
1241 mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
1243 #define MLX5_CAP_DEBUG_MAX(mdev, cap) \
1244 MLX5_GET(debug_cap, \
1245 mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
1247 #define MLX5_CAP_QOS(mdev, cap) \
1249 mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1251 #define MLX5_CAP_QOS_MAX(mdev, cap) \
1253 mdev->hca_caps_max[MLX5_CAP_QOS], cap)
1256 MLX5_CMD_STAT_OK = 0x0,
1257 MLX5_CMD_STAT_INT_ERR = 0x1,
1258 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1259 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1260 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1261 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1262 MLX5_CMD_STAT_RES_BUSY = 0x6,
1263 MLX5_CMD_STAT_LIM_ERR = 0x8,
1264 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1265 MLX5_CMD_STAT_IX_ERR = 0xa,
1266 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1267 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1268 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1269 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1270 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1271 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1275 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1276 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1277 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1278 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1279 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1280 MLX5_ETHERNET_DISCARD_COUNTERS_GROUP = 0x6,
1281 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1282 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1283 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1284 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1288 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1289 MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1,
1290 MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
1294 MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
1295 MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
1299 NUM_DRIVER_UARS = 4,
1300 NUM_LOW_LAT_UUARS = 4,
1304 MLX5_CAP_PORT_TYPE_IB = 0x0,
1305 MLX5_CAP_PORT_TYPE_ETH = 0x1,
1309 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0,
1310 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
1311 MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
1314 enum mlx5_inline_modes {
1315 MLX5_INLINE_MODE_NONE,
1316 MLX5_INLINE_MODE_L2,
1317 MLX5_INLINE_MODE_IP,
1318 MLX5_INLINE_MODE_TCP_UDP,
1322 MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
1325 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1327 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1329 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1332 struct mlx5_ifc_mcia_reg_bits {
1339 u8 i2c_device_address[0x8];
1340 u8 page_number[0x8];
1341 u8 device_address[0x10];
1343 u8 reserved_2[0x10];
1346 u8 reserved_3[0x20];
1362 #define MLX5_CMD_OP_QUERY_EEPROM 0x93c
1364 struct mlx5_mini_cqe8 {
1366 __be32 rx_hash_result;
1379 MLX5_NO_INLINE_DATA,
1380 MLX5_INLINE_DATA32_SEG,
1381 MLX5_INLINE_DATA64_SEG,
1385 enum mlx5_exp_cqe_zip_recv_type {
1386 MLX5_CQE_FORMAT_HASH,
1387 MLX5_CQE_FORMAT_CSUM,
1390 #define MLX5E_CQE_FORMAT_MASK 0xc
1391 static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
1393 return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
1396 /* 8 regular priorities + 1 for multicast */
1397 #define MLX5_NUM_BYPASS_FTS 9
1399 #endif /* MLX5_DEVICE_H */