]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/device.h
Merge lldb trunk r321017 to contrib/llvm/tools/lldb.
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / device.h
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #ifndef MLX5_DEVICE_H
29 #define MLX5_DEVICE_H
30
31 #include <linux/types.h>
32 #include <rdma/ib_verbs.h>
33 #include <dev/mlx5/mlx5_ifc.h>
34
35 #define FW_INIT_TIMEOUT_MILI 2000
36 #define FW_INIT_WAIT_MS 2
37
38 #if defined(__LITTLE_ENDIAN)
39 #define MLX5_SET_HOST_ENDIANNESS        0
40 #elif defined(__BIG_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS        0x80
42 #else
43 #error Host endianness not defined
44 #endif
45
46 /* helper macros */
47 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
48 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
49 #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
50 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
51 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
52 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
53 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
54 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
55 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
56
57 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
58 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
59 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
60 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
61 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
62 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
63 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
64 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
65
66 /* insert a value to a struct */
67 #define MLX5_SET(typ, p, fld, v) do { \
68         BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
69         BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
70         *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
71         cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
72                      (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
73                      << __mlx5_dw_bit_off(typ, fld))); \
74 } while (0)
75
76 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
77         BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
78         BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
79         *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
80         cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
81                      (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
82                      << __mlx5_dw_bit_off(typ, fld))); \
83 } while (0)
84
85 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
86 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
87 __mlx5_mask(typ, fld))
88
89 #define MLX5_GET_PR(typ, p, fld) ({ \
90         u32 ___t = MLX5_GET(typ, p, fld); \
91         pr_debug(#fld " = 0x%x\n", ___t); \
92         ___t; \
93 })
94
95 #define MLX5_SET64(typ, p, fld, v) do { \
96         BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
97         BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
98         *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
99 } while (0)
100
101 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
102
103 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
104         __mlx5_64_off(typ, fld)))
105
106 #define MLX5_GET_BE(type_t, typ, p, fld) ({                               \
107                 type_t tmp;                                               \
108                 switch (sizeof(tmp)) {                                    \
109                 case sizeof(u8):                                          \
110                         tmp = (__force type_t)MLX5_GET(typ, p, fld);      \
111                         break;                                            \
112                 case sizeof(u16):                                         \
113                         tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
114                         break;                                            \
115                 case sizeof(u32):                                         \
116                         tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
117                         break;                                            \
118                 case sizeof(u64):                                         \
119                         tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
120                         break;                                            \
121                         }                                                 \
122                 tmp;                                                      \
123                 })
124
125 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
126 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
127 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
128 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
129                                     MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
130                                     MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
131
132 enum {
133         MLX5_MAX_COMMANDS               = 32,
134         MLX5_CMD_DATA_BLOCK_SIZE        = 512,
135         MLX5_CMD_MBOX_SIZE              = 1024,
136         MLX5_PCI_CMD_XPORT              = 7,
137         MLX5_MKEY_BSF_OCTO_SIZE         = 4,
138         MLX5_MAX_PSVS                   = 4,
139 };
140
141 enum {
142         MLX5_EXTENDED_UD_AV             = 0x80000000,
143 };
144
145 enum {
146         MLX5_CQ_FLAGS_OI        = 2,
147 };
148
149 enum {
150         MLX5_STAT_RATE_OFFSET   = 5,
151 };
152
153 enum {
154         MLX5_INLINE_SEG = 0x80000000,
155 };
156
157 enum {
158         MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
159 };
160
161 enum {
162         MLX5_MIN_PKEY_TABLE_SIZE = 128,
163         MLX5_MAX_LOG_PKEY_TABLE  = 5,
164 };
165
166 enum {
167         MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
168 };
169
170 enum {
171         MLX5_PERM_LOCAL_READ    = 1 << 2,
172         MLX5_PERM_LOCAL_WRITE   = 1 << 3,
173         MLX5_PERM_REMOTE_READ   = 1 << 4,
174         MLX5_PERM_REMOTE_WRITE  = 1 << 5,
175         MLX5_PERM_ATOMIC        = 1 << 6,
176         MLX5_PERM_UMR_EN        = 1 << 7,
177 };
178
179 enum {
180         MLX5_PCIE_CTRL_SMALL_FENCE      = 1 << 0,
181         MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
182         MLX5_PCIE_CTRL_NO_SNOOP         = 1 << 3,
183         MLX5_PCIE_CTRL_TLP_PROCE_EN     = 1 << 6,
184         MLX5_PCIE_CTRL_TPH_MASK         = 3 << 4,
185 };
186
187 enum {
188         MLX5_MKEY_REMOTE_INVAL  = 1 << 24,
189         MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
190         MLX5_MKEY_BSF_EN        = 1 << 30,
191         MLX5_MKEY_LEN64         = 1 << 31,
192 };
193
194 enum {
195         MLX5_EN_RD      = (u64)1,
196         MLX5_EN_WR      = (u64)2
197 };
198
199 enum {
200         MLX5_BF_REGS_PER_PAGE           = 4,
201         MLX5_MAX_UAR_PAGES              = 1 << 8,
202         MLX5_NON_FP_BF_REGS_PER_PAGE    = 2,
203         MLX5_MAX_UUARS  = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
204 };
205
206 enum {
207         MLX5_MKEY_MASK_LEN              = 1ull << 0,
208         MLX5_MKEY_MASK_PAGE_SIZE        = 1ull << 1,
209         MLX5_MKEY_MASK_START_ADDR       = 1ull << 6,
210         MLX5_MKEY_MASK_PD               = 1ull << 7,
211         MLX5_MKEY_MASK_EN_RINVAL        = 1ull << 8,
212         MLX5_MKEY_MASK_EN_SIGERR        = 1ull << 9,
213         MLX5_MKEY_MASK_BSF_EN           = 1ull << 12,
214         MLX5_MKEY_MASK_KEY              = 1ull << 13,
215         MLX5_MKEY_MASK_QPN              = 1ull << 14,
216         MLX5_MKEY_MASK_LR               = 1ull << 17,
217         MLX5_MKEY_MASK_LW               = 1ull << 18,
218         MLX5_MKEY_MASK_RR               = 1ull << 19,
219         MLX5_MKEY_MASK_RW               = 1ull << 20,
220         MLX5_MKEY_MASK_A                = 1ull << 21,
221         MLX5_MKEY_MASK_SMALL_FENCE      = 1ull << 23,
222         MLX5_MKEY_MASK_FREE             = 1ull << 29,
223 };
224
225 enum {
226         MLX5_UMR_TRANSLATION_OFFSET_EN  = (1 << 4),
227
228         MLX5_UMR_CHECK_NOT_FREE         = (1 << 5),
229         MLX5_UMR_CHECK_FREE             = (2 << 5),
230
231         MLX5_UMR_INLINE                 = (1 << 7),
232 };
233
234 #define MLX5_UMR_MTT_ALIGNMENT 0x40
235 #define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
236 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
237
238 enum {
239         MLX5_EVENT_QUEUE_TYPE_QP = 0,
240         MLX5_EVENT_QUEUE_TYPE_RQ = 1,
241         MLX5_EVENT_QUEUE_TYPE_SQ = 2,
242 };
243
244 enum {
245         MLX5_PORT_CHANGE_SUBTYPE_DOWN           = 1,
246         MLX5_PORT_CHANGE_SUBTYPE_ACTIVE         = 4,
247         MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED    = 5,
248         MLX5_PORT_CHANGE_SUBTYPE_LID            = 6,
249         MLX5_PORT_CHANGE_SUBTYPE_PKEY           = 7,
250         MLX5_PORT_CHANGE_SUBTYPE_GUID           = 8,
251         MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG   = 9,
252 };
253
254 enum {
255         MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
256         MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
257         MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
258         MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
259         MLX5_MAX_INLINE_RECEIVE_SIZE            = 64
260 };
261
262 enum {
263         MLX5_DEV_CAP_FLAG_XRC           = 1LL <<  3,
264         MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL <<  8,
265         MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL <<  9,
266         MLX5_DEV_CAP_FLAG_APM           = 1LL << 17,
267         MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD  = 1LL << 21,
268         MLX5_DEV_CAP_FLAG_BLOCK_MCAST   = 1LL << 23,
269         MLX5_DEV_CAP_FLAG_CQ_MODER      = 1LL << 29,
270         MLX5_DEV_CAP_FLAG_RESIZE_CQ     = 1LL << 30,
271         MLX5_DEV_CAP_FLAG_ATOMIC        = 1LL << 33,
272         MLX5_DEV_CAP_FLAG_ROCE          = 1LL << 34,
273         MLX5_DEV_CAP_FLAG_DCT           = 1LL << 37,
274         MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
275         MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 3LL << 46,
276         MLX5_DEV_CAP_FLAG_DRAIN_SIGERR  = 1LL << 48,
277 };
278
279 enum {
280         MLX5_ROCE_VERSION_1             = 0,
281         MLX5_ROCE_VERSION_1_5           = 1,
282         MLX5_ROCE_VERSION_2             = 2,
283 };
284
285 enum {
286         MLX5_ROCE_VERSION_1_CAP         = 1 << MLX5_ROCE_VERSION_1,
287         MLX5_ROCE_VERSION_1_5_CAP       = 1 << MLX5_ROCE_VERSION_1_5,
288         MLX5_ROCE_VERSION_2_CAP         = 1 << MLX5_ROCE_VERSION_2,
289 };
290
291 enum {
292         MLX5_ROCE_L3_TYPE_IPV4          = 0,
293         MLX5_ROCE_L3_TYPE_IPV6          = 1,
294 };
295
296 enum {
297         MLX5_ROCE_L3_TYPE_IPV4_CAP      = 1 << 1,
298         MLX5_ROCE_L3_TYPE_IPV6_CAP      = 1 << 2,
299 };
300
301 enum {
302         MLX5_OPCODE_NOP                 = 0x00,
303         MLX5_OPCODE_SEND_INVAL          = 0x01,
304         MLX5_OPCODE_RDMA_WRITE          = 0x08,
305         MLX5_OPCODE_RDMA_WRITE_IMM      = 0x09,
306         MLX5_OPCODE_SEND                = 0x0a,
307         MLX5_OPCODE_SEND_IMM            = 0x0b,
308         MLX5_OPCODE_LSO                 = 0x0e,
309         MLX5_OPCODE_RDMA_READ           = 0x10,
310         MLX5_OPCODE_ATOMIC_CS           = 0x11,
311         MLX5_OPCODE_ATOMIC_FA           = 0x12,
312         MLX5_OPCODE_ATOMIC_MASKED_CS    = 0x14,
313         MLX5_OPCODE_ATOMIC_MASKED_FA    = 0x15,
314         MLX5_OPCODE_BIND_MW             = 0x18,
315         MLX5_OPCODE_CONFIG_CMD          = 0x1f,
316
317         MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
318         MLX5_RECV_OPCODE_SEND           = 0x01,
319         MLX5_RECV_OPCODE_SEND_IMM       = 0x02,
320         MLX5_RECV_OPCODE_SEND_INVAL     = 0x03,
321
322         MLX5_CQE_OPCODE_ERROR           = 0x1e,
323         MLX5_CQE_OPCODE_RESIZE          = 0x16,
324
325         MLX5_OPCODE_SET_PSV             = 0x20,
326         MLX5_OPCODE_GET_PSV             = 0x21,
327         MLX5_OPCODE_CHECK_PSV           = 0x22,
328         MLX5_OPCODE_RGET_PSV            = 0x26,
329         MLX5_OPCODE_RCHECK_PSV          = 0x27,
330
331         MLX5_OPCODE_UMR                 = 0x25,
332
333         MLX5_OPCODE_SIGNATURE_CANCELED  = (1 << 15),
334 };
335
336 enum {
337         MLX5_SET_PORT_RESET_QKEY        = 0,
338         MLX5_SET_PORT_GUID0             = 16,
339         MLX5_SET_PORT_NODE_GUID         = 17,
340         MLX5_SET_PORT_SYS_GUID          = 18,
341         MLX5_SET_PORT_GID_TABLE         = 19,
342         MLX5_SET_PORT_PKEY_TABLE        = 20,
343 };
344
345 enum {
346         MLX5_MAX_PAGE_SHIFT             = 31
347 };
348
349 enum {
350         MLX5_ADAPTER_PAGE_SHIFT         = 12,
351         MLX5_ADAPTER_PAGE_SIZE          = 1 << MLX5_ADAPTER_PAGE_SHIFT,
352 };
353
354 enum {
355         MLX5_CAP_OFF_CMDIF_CSUM         = 46,
356 };
357
358 enum {
359         /*
360          * Max wqe size for rdma read is 512 bytes, so this
361          * limits our max_sge_rd as the wqe needs to fit:
362          * - ctrl segment (16 bytes)
363          * - rdma segment (16 bytes)
364          * - scatter elements (16 bytes each)
365          */
366         MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
367 };
368
369 struct mlx5_inbox_hdr {
370         __be16          opcode;
371         u8              rsvd[4];
372         __be16          opmod;
373 };
374
375 struct mlx5_outbox_hdr {
376         u8              status;
377         u8              rsvd[3];
378         __be32          syndrome;
379 };
380
381 struct mlx5_cmd_set_dc_cnak_mbox_in {
382         struct mlx5_inbox_hdr   hdr;
383         u8                      enable;
384         u8                      reserved[47];
385         __be64                  pa;
386 };
387
388 struct mlx5_cmd_set_dc_cnak_mbox_out {
389         struct mlx5_outbox_hdr  hdr;
390         u8                      rsvd[8];
391 };
392
393 struct mlx5_cmd_layout {
394         u8              type;
395         u8              rsvd0[3];
396         __be32          inlen;
397         __be64          in_ptr;
398         __be32          in[4];
399         __be32          out[4];
400         __be64          out_ptr;
401         __be32          outlen;
402         u8              token;
403         u8              sig;
404         u8              rsvd1;
405         u8              status_own;
406 };
407
408
409 struct mlx5_health_buffer {
410         __be32          assert_var[5];
411         __be32          rsvd0[3];
412         __be32          assert_exit_ptr;
413         __be32          assert_callra;
414         __be32          rsvd1[2];
415         __be32          fw_ver;
416         __be32          hw_id;
417         __be32          rsvd2;
418         u8              irisc_index;
419         u8              synd;
420         __be16          ext_sync;
421 };
422
423 struct mlx5_init_seg {
424         __be32                  fw_rev;
425         __be32                  cmdif_rev_fw_sub;
426         __be32                  rsvd0[2];
427         __be32                  cmdq_addr_h;
428         __be32                  cmdq_addr_l_sz;
429         __be32                  cmd_dbell;
430         __be32                  rsvd1[120];
431         __be32                  initializing;
432         struct mlx5_health_buffer  health;
433         __be32                  rsvd2[880];
434         __be32                  internal_timer_h;
435         __be32                  internal_timer_l;
436         __be32                  rsvd3[2];
437         __be32                  health_counter;
438         __be32                  rsvd4[1019];
439         __be64                  ieee1588_clk;
440         __be32                  ieee1588_clk_type;
441         __be32                  clr_intx;
442 };
443
444 struct mlx5_eqe_comp {
445         __be32  reserved[6];
446         __be32  cqn;
447 };
448
449 struct mlx5_eqe_qp_srq {
450         __be32  reserved[6];
451         __be32  qp_srq_n;
452 };
453
454 struct mlx5_eqe_cq_err {
455         __be32  cqn;
456         u8      reserved1[7];
457         u8      syndrome;
458 };
459
460 struct mlx5_eqe_port_state {
461         u8      reserved0[8];
462         u8      port;
463 };
464
465 struct mlx5_eqe_gpio {
466         __be32  reserved0[2];
467         __be64  gpio_event;
468 };
469
470 struct mlx5_eqe_congestion {
471         u8      type;
472         u8      rsvd0;
473         u8      congestion_level;
474 };
475
476 struct mlx5_eqe_stall_vl {
477         u8      rsvd0[3];
478         u8      port_vl;
479 };
480
481 struct mlx5_eqe_cmd {
482         __be32  vector;
483         __be32  rsvd[6];
484 };
485
486 struct mlx5_eqe_page_req {
487         u8              rsvd0[2];
488         __be16          func_id;
489         __be32          num_pages;
490         __be32          rsvd1[5];
491 };
492
493 struct mlx5_eqe_vport_change {
494         u8              rsvd0[2];
495         __be16          vport_num;
496         __be32          rsvd1[6];
497 };
498
499
500 #define PORT_MODULE_EVENT_MODULE_STATUS_MASK  0xF
501 #define PORT_MODULE_EVENT_ERROR_TYPE_MASK     0xF
502
503 enum {
504         MLX5_MODULE_STATUS_PLUGGED    = 0x1,
505         MLX5_MODULE_STATUS_UNPLUGGED  = 0x2,
506         MLX5_MODULE_STATUS_ERROR      = 0x3,
507 };
508
509 enum {
510         MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED                 = 0x0,
511         MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE  = 0x1,
512         MLX5_MODULE_EVENT_ERROR_BUS_STUCK                             = 0x2,
513         MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT               = 0x3,
514         MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST              = 0x4,
515         MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER                    = 0x5,
516         MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE                      = 0x6,
517         MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED                      = 0x7,
518 };
519
520 struct mlx5_eqe_port_module_event {
521         u8        rsvd0;
522         u8        module;
523         u8        rsvd1;
524         u8        module_status;
525         u8        rsvd2[2];
526         u8        error_type;
527 };
528
529 struct mlx5_eqe_general_notification_event {
530         u32       rq_user_index_delay_drop;
531         u32       rsvd0[6];
532 };
533
534 union ev_data {
535         __be32                          raw[7];
536         struct mlx5_eqe_cmd             cmd;
537         struct mlx5_eqe_comp            comp;
538         struct mlx5_eqe_qp_srq          qp_srq;
539         struct mlx5_eqe_cq_err          cq_err;
540         struct mlx5_eqe_port_state      port;
541         struct mlx5_eqe_gpio            gpio;
542         struct mlx5_eqe_congestion      cong;
543         struct mlx5_eqe_stall_vl        stall_vl;
544         struct mlx5_eqe_page_req        req_pages;
545         struct mlx5_eqe_port_module_event port_module_event;
546         struct mlx5_eqe_vport_change    vport_change;
547         struct mlx5_eqe_general_notification_event general_notifications;
548 } __packed;
549
550 struct mlx5_eqe {
551         u8              rsvd0;
552         u8              type;
553         u8              rsvd1;
554         u8              sub_type;
555         __be32          rsvd2[7];
556         union ev_data   data;
557         __be16          rsvd3;
558         u8              signature;
559         u8              owner;
560 } __packed;
561
562 struct mlx5_cmd_prot_block {
563         u8              data[MLX5_CMD_DATA_BLOCK_SIZE];
564         u8              rsvd0[48];
565         __be64          next;
566         __be32          block_num;
567         u8              rsvd1;
568         u8              token;
569         u8              ctrl_sig;
570         u8              sig;
571 };
572
573 #define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \
574         (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE)
575 CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block));
576 CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE);
577
578 enum {
579         MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
580 };
581
582 struct mlx5_err_cqe {
583         u8      rsvd0[32];
584         __be32  srqn;
585         u8      rsvd1[18];
586         u8      vendor_err_synd;
587         u8      syndrome;
588         __be32  s_wqe_opcode_qpn;
589         __be16  wqe_counter;
590         u8      signature;
591         u8      op_own;
592 };
593
594 struct mlx5_cqe64 {
595         u8              tunneled_etc;
596         u8              rsvd0[3];
597         u8              lro_tcppsh_abort_dupack;
598         u8              lro_min_ttl;
599         __be16          lro_tcp_win;
600         __be32          lro_ack_seq_num;
601         __be32          rss_hash_result;
602         u8              rss_hash_type;
603         u8              ml_path;
604         u8              rsvd20[2];
605         __be16          check_sum;
606         __be16          slid;
607         __be32          flags_rqpn;
608         u8              hds_ip_ext;
609         u8              l4_hdr_type_etc;
610         __be16          vlan_info;
611         __be32          srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
612         __be32          imm_inval_pkey;
613         u8              rsvd40[4];
614         __be32          byte_cnt;
615         __be64          timestamp;
616         __be32          sop_drop_qpn;
617         __be16          wqe_counter;
618         u8              signature;
619         u8              op_own;
620 };
621
622 #define MLX5_CQE_TSTMP_PTP      (1ULL << 63)
623
624 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
625 {
626         return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
627 }
628
629 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
630 {
631         return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
632 }
633
634 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
635 {
636         return (cqe->l4_hdr_type_etc >> 4) & 0x7;
637 }
638
639 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
640 {
641         return be16_to_cpu(cqe->vlan_info) & 0xfff;
642 }
643
644 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
645 {
646         memcpy(smac, &cqe->rss_hash_type , 4);
647         memcpy(smac + 4, &cqe->slid , 2);
648 }
649
650 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
651 {
652         return cqe->l4_hdr_type_etc & 0x1;
653 }
654
655 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
656 {
657         return cqe->tunneled_etc & 0x1;
658 }
659
660 enum {
661         CQE_L4_HDR_TYPE_NONE                    = 0x0,
662         CQE_L4_HDR_TYPE_TCP_NO_ACK              = 0x1,
663         CQE_L4_HDR_TYPE_UDP                     = 0x2,
664         CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA         = 0x3,
665         CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA        = 0x4,
666 };
667
668 enum {
669         /* source L3 hash types */
670         CQE_RSS_SRC_HTYPE_IP    = 0x3 << 0,
671         CQE_RSS_SRC_HTYPE_IPV4  = 0x1 << 0,
672         CQE_RSS_SRC_HTYPE_IPV6  = 0x2 << 0,
673
674         /* destination L3 hash types */
675         CQE_RSS_DST_HTYPE_IP    = 0x3 << 2,
676         CQE_RSS_DST_HTYPE_IPV4  = 0x1 << 2,
677         CQE_RSS_DST_HTYPE_IPV6  = 0x2 << 2,
678
679         /* source L4 hash types */
680         CQE_RSS_SRC_HTYPE_L4    = 0x3 << 4,
681         CQE_RSS_SRC_HTYPE_TCP   = 0x1 << 4,
682         CQE_RSS_SRC_HTYPE_UDP   = 0x2 << 4,
683         CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4,
684
685         /* destination L4 hash types */
686         CQE_RSS_DST_HTYPE_L4    = 0x3 << 6,
687         CQE_RSS_DST_HTYPE_TCP   = 0x1 << 6,
688         CQE_RSS_DST_HTYPE_UDP   = 0x2 << 6,
689         CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6,
690 };
691
692 enum {
693         MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH        = 0x0,
694         MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6       = 0x1,
695         MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4       = 0x2,
696 };
697
698 enum {
699         CQE_L2_OK       = 1 << 0,
700         CQE_L3_OK       = 1 << 1,
701         CQE_L4_OK       = 1 << 2,
702 };
703
704 struct mlx5_sig_err_cqe {
705         u8              rsvd0[16];
706         __be32          expected_trans_sig;
707         __be32          actual_trans_sig;
708         __be32          expected_reftag;
709         __be32          actual_reftag;
710         __be16          syndrome;
711         u8              rsvd22[2];
712         __be32          mkey;
713         __be64          err_offset;
714         u8              rsvd30[8];
715         __be32          qpn;
716         u8              rsvd38[2];
717         u8              signature;
718         u8              op_own;
719 };
720
721 struct mlx5_wqe_srq_next_seg {
722         u8                      rsvd0[2];
723         __be16                  next_wqe_index;
724         u8                      signature;
725         u8                      rsvd1[11];
726 };
727
728 union mlx5_ext_cqe {
729         struct ib_grh   grh;
730         u8              inl[64];
731 };
732
733 struct mlx5_cqe128 {
734         union mlx5_ext_cqe      inl_grh;
735         struct mlx5_cqe64       cqe64;
736 };
737
738 struct mlx5_srq_ctx {
739         u8                      state_log_sz;
740         u8                      rsvd0[3];
741         __be32                  flags_xrcd;
742         __be32                  pgoff_cqn;
743         u8                      rsvd1[4];
744         u8                      log_pg_sz;
745         u8                      rsvd2[7];
746         __be32                  pd;
747         __be16                  lwm;
748         __be16                  wqe_cnt;
749         u8                      rsvd3[8];
750         __be64                  db_record;
751 };
752
753 struct mlx5_create_srq_mbox_in {
754         struct mlx5_inbox_hdr   hdr;
755         __be32                  input_srqn;
756         u8                      rsvd0[4];
757         struct mlx5_srq_ctx     ctx;
758         u8                      rsvd1[208];
759         __be64                  pas[0];
760 };
761
762 struct mlx5_create_srq_mbox_out {
763         struct mlx5_outbox_hdr  hdr;
764         __be32                  srqn;
765         u8                      rsvd[4];
766 };
767
768 struct mlx5_destroy_srq_mbox_in {
769         struct mlx5_inbox_hdr   hdr;
770         __be32                  srqn;
771         u8                      rsvd[4];
772 };
773
774 struct mlx5_destroy_srq_mbox_out {
775         struct mlx5_outbox_hdr  hdr;
776         u8                      rsvd[8];
777 };
778
779 struct mlx5_query_srq_mbox_in {
780         struct mlx5_inbox_hdr   hdr;
781         __be32                  srqn;
782         u8                      rsvd0[4];
783 };
784
785 struct mlx5_query_srq_mbox_out {
786         struct mlx5_outbox_hdr  hdr;
787         u8                      rsvd0[8];
788         struct mlx5_srq_ctx     ctx;
789         u8                      rsvd1[32];
790         __be64                  pas[0];
791 };
792
793 struct mlx5_arm_srq_mbox_in {
794         struct mlx5_inbox_hdr   hdr;
795         __be32                  srqn;
796         __be16                  rsvd;
797         __be16                  lwm;
798 };
799
800 struct mlx5_arm_srq_mbox_out {
801         struct mlx5_outbox_hdr  hdr;
802         u8                      rsvd[8];
803 };
804
805 struct mlx5_cq_context {
806         u8                      status;
807         u8                      cqe_sz_flags;
808         u8                      st;
809         u8                      rsvd3;
810         u8                      rsvd4[6];
811         __be16                  page_offset;
812         __be32                  log_sz_usr_page;
813         __be16                  cq_period;
814         __be16                  cq_max_count;
815         __be16                  rsvd20;
816         __be16                  c_eqn;
817         u8                      log_pg_sz;
818         u8                      rsvd25[7];
819         __be32                  last_notified_index;
820         __be32                  solicit_producer_index;
821         __be32                  consumer_counter;
822         __be32                  producer_counter;
823         u8                      rsvd48[8];
824         __be64                  db_record_addr;
825 };
826
827 struct mlx5_create_cq_mbox_in {
828         struct mlx5_inbox_hdr   hdr;
829         __be32                  input_cqn;
830         u8                      rsvdx[4];
831         struct mlx5_cq_context  ctx;
832         u8                      rsvd6[192];
833         __be64                  pas[0];
834 };
835
836 struct mlx5_create_cq_mbox_out {
837         struct mlx5_outbox_hdr  hdr;
838         __be32                  cqn;
839         u8                      rsvd0[4];
840 };
841
842 struct mlx5_destroy_cq_mbox_in {
843         struct mlx5_inbox_hdr   hdr;
844         __be32                  cqn;
845         u8                      rsvd0[4];
846 };
847
848 struct mlx5_destroy_cq_mbox_out {
849         struct mlx5_outbox_hdr  hdr;
850         u8                      rsvd0[8];
851 };
852
853 struct mlx5_query_cq_mbox_in {
854         struct mlx5_inbox_hdr   hdr;
855         __be32                  cqn;
856         u8                      rsvd0[4];
857 };
858
859 struct mlx5_query_cq_mbox_out {
860         struct mlx5_outbox_hdr  hdr;
861         u8                      rsvd0[8];
862         struct mlx5_cq_context  ctx;
863         u8                      rsvd6[16];
864         __be64                  pas[0];
865 };
866
867 struct mlx5_modify_cq_mbox_in {
868         struct mlx5_inbox_hdr   hdr;
869         __be32                  cqn;
870         __be32                  field_select;
871         struct mlx5_cq_context  ctx;
872         u8                      rsvd[192];
873         __be64                  pas[0];
874 };
875
876 struct mlx5_modify_cq_mbox_out {
877         struct mlx5_outbox_hdr  hdr;
878         u8                      rsvd[8];
879 };
880
881 struct mlx5_eq_context {
882         u8                      status;
883         u8                      ec_oi;
884         u8                      st;
885         u8                      rsvd2[7];
886         __be16                  page_pffset;
887         __be32                  log_sz_usr_page;
888         u8                      rsvd3[7];
889         u8                      intr;
890         u8                      log_page_size;
891         u8                      rsvd4[15];
892         __be32                  consumer_counter;
893         __be32                  produser_counter;
894         u8                      rsvd5[16];
895 };
896
897 struct mlx5_create_eq_mbox_in {
898         struct mlx5_inbox_hdr   hdr;
899         u8                      rsvd0[3];
900         u8                      input_eqn;
901         u8                      rsvd1[4];
902         struct mlx5_eq_context  ctx;
903         u8                      rsvd2[8];
904         __be64                  events_mask;
905         u8                      rsvd3[176];
906         __be64                  pas[0];
907 };
908
909 struct mlx5_create_eq_mbox_out {
910         struct mlx5_outbox_hdr  hdr;
911         u8                      rsvd0[3];
912         u8                      eq_number;
913         u8                      rsvd1[4];
914 };
915
916 struct mlx5_map_eq_mbox_in {
917         struct mlx5_inbox_hdr   hdr;
918         __be64                  mask;
919         u8                      mu;
920         u8                      rsvd0[2];
921         u8                      eqn;
922         u8                      rsvd1[24];
923 };
924
925 struct mlx5_map_eq_mbox_out {
926         struct mlx5_outbox_hdr  hdr;
927         u8                      rsvd[8];
928 };
929
930 struct mlx5_query_eq_mbox_in {
931         struct mlx5_inbox_hdr   hdr;
932         u8                      rsvd0[3];
933         u8                      eqn;
934         u8                      rsvd1[4];
935 };
936
937 struct mlx5_query_eq_mbox_out {
938         struct mlx5_outbox_hdr  hdr;
939         u8                      rsvd[8];
940         struct mlx5_eq_context  ctx;
941 };
942
943 enum {
944         MLX5_MKEY_STATUS_FREE = 1 << 6,
945 };
946
947 struct mlx5_mkey_seg {
948         /* This is a two bit field occupying bits 31-30.
949          * bit 31 is always 0,
950          * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
951          */
952         u8              status;
953         u8              pcie_control;
954         u8              flags;
955         u8              version;
956         __be32          qpn_mkey7_0;
957         u8              rsvd1[4];
958         __be32          flags_pd;
959         __be64          start_addr;
960         __be64          len;
961         __be32          bsfs_octo_size;
962         u8              rsvd2[16];
963         __be32          xlt_oct_size;
964         u8              rsvd3[3];
965         u8              log2_page_size;
966         u8              rsvd4[4];
967 };
968
969 struct mlx5_query_special_ctxs_mbox_in {
970         struct mlx5_inbox_hdr   hdr;
971         u8                      rsvd[8];
972 };
973
974 struct mlx5_query_special_ctxs_mbox_out {
975         struct mlx5_outbox_hdr  hdr;
976         __be32                  dump_fill_mkey;
977         __be32                  reserved_lkey;
978 };
979
980 struct mlx5_create_mkey_mbox_in {
981         struct mlx5_inbox_hdr   hdr;
982         __be32                  input_mkey_index;
983         __be32                  flags;
984         struct mlx5_mkey_seg    seg;
985         u8                      rsvd1[16];
986         __be32                  xlat_oct_act_size;
987         __be32                  rsvd2;
988         u8                      rsvd3[168];
989         __be64                  pas[0];
990 };
991
992 struct mlx5_create_mkey_mbox_out {
993         struct mlx5_outbox_hdr  hdr;
994         __be32                  mkey;
995         u8                      rsvd[4];
996 };
997
998 struct mlx5_query_mkey_mbox_in {
999         struct mlx5_inbox_hdr   hdr;
1000         __be32                  mkey;
1001 };
1002
1003 struct mlx5_query_mkey_mbox_out {
1004         struct mlx5_outbox_hdr  hdr;
1005         __be64                  pas[0];
1006 };
1007
1008 struct mlx5_modify_mkey_mbox_in {
1009         struct mlx5_inbox_hdr   hdr;
1010         __be32                  mkey;
1011         __be64                  pas[0];
1012 };
1013
1014 struct mlx5_modify_mkey_mbox_out {
1015         struct mlx5_outbox_hdr  hdr;
1016         u8                      rsvd[8];
1017 };
1018
1019 struct mlx5_dump_mkey_mbox_in {
1020         struct mlx5_inbox_hdr   hdr;
1021 };
1022
1023 struct mlx5_dump_mkey_mbox_out {
1024         struct mlx5_outbox_hdr  hdr;
1025         __be32                  mkey;
1026 };
1027
1028 struct mlx5_mad_ifc_mbox_in {
1029         struct mlx5_inbox_hdr   hdr;
1030         __be16                  remote_lid;
1031         u8                      rsvd0;
1032         u8                      port;
1033         u8                      rsvd1[4];
1034         u8                      data[256];
1035 };
1036
1037 struct mlx5_mad_ifc_mbox_out {
1038         struct mlx5_outbox_hdr  hdr;
1039         u8                      rsvd[8];
1040         u8                      data[256];
1041 };
1042
1043 struct mlx5_access_reg_mbox_in {
1044         struct mlx5_inbox_hdr           hdr;
1045         u8                              rsvd0[2];
1046         __be16                          register_id;
1047         __be32                          arg;
1048         __be32                          data[0];
1049 };
1050
1051 struct mlx5_access_reg_mbox_out {
1052         struct mlx5_outbox_hdr          hdr;
1053         u8                              rsvd[8];
1054         __be32                          data[0];
1055 };
1056
1057 #define MLX5_ATTR_EXTENDED_PORT_INFO    cpu_to_be16(0xff90)
1058
1059 enum {
1060         MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO        = 1 <<  0
1061 };
1062
1063 struct mlx5_allocate_psv_in {
1064         struct mlx5_inbox_hdr   hdr;
1065         __be32                  npsv_pd;
1066         __be32                  rsvd_psv0;
1067 };
1068
1069 struct mlx5_allocate_psv_out {
1070         struct mlx5_outbox_hdr  hdr;
1071         u8                      rsvd[8];
1072         __be32                  psv_idx[4];
1073 };
1074
1075 struct mlx5_destroy_psv_in {
1076         struct mlx5_inbox_hdr   hdr;
1077         __be32                  psv_number;
1078         u8                      rsvd[4];
1079 };
1080
1081 struct mlx5_destroy_psv_out {
1082         struct mlx5_outbox_hdr  hdr;
1083         u8                      rsvd[8];
1084 };
1085
1086 static inline int mlx5_host_is_le(void)
1087 {
1088 #if defined(__LITTLE_ENDIAN)
1089         return 1;
1090 #elif defined(__BIG_ENDIAN)
1091         return 0;
1092 #else
1093 #error Host endianness not defined
1094 #endif
1095 }
1096
1097 #define MLX5_CMD_OP_MAX 0x939
1098
1099 enum {
1100         VPORT_STATE_DOWN                = 0x0,
1101         VPORT_STATE_UP                  = 0x1,
1102 };
1103
1104 enum {
1105         MLX5_L3_PROT_TYPE_IPV4          = 0,
1106         MLX5_L3_PROT_TYPE_IPV6          = 1,
1107 };
1108
1109 enum {
1110         MLX5_L4_PROT_TYPE_TCP           = 0,
1111         MLX5_L4_PROT_TYPE_UDP           = 1,
1112 };
1113
1114 enum {
1115         MLX5_HASH_FIELD_SEL_SRC_IP      = 1 << 0,
1116         MLX5_HASH_FIELD_SEL_DST_IP      = 1 << 1,
1117         MLX5_HASH_FIELD_SEL_L4_SPORT    = 1 << 2,
1118         MLX5_HASH_FIELD_SEL_L4_DPORT    = 1 << 3,
1119         MLX5_HASH_FIELD_SEL_IPSEC_SPI   = 1 << 4,
1120 };
1121
1122 enum {
1123         MLX5_MATCH_OUTER_HEADERS        = 1 << 0,
1124         MLX5_MATCH_MISC_PARAMETERS      = 1 << 1,
1125         MLX5_MATCH_INNER_HEADERS        = 1 << 2,
1126
1127 };
1128
1129 enum {
1130         MLX5_FLOW_TABLE_TYPE_NIC_RCV     = 0,
1131         MLX5_FLOW_TABLE_TYPE_EGRESS_ACL  = 2,
1132         MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
1133         MLX5_FLOW_TABLE_TYPE_ESWITCH     = 4,
1134         MLX5_FLOW_TABLE_TYPE_SNIFFER_RX  = 5,
1135         MLX5_FLOW_TABLE_TYPE_SNIFFER_TX  = 6,
1136         MLX5_FLOW_TABLE_TYPE_NIC_RX_RDMA = 7,
1137 };
1138
1139 enum {
1140         MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE       = 0,
1141         MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
1142         MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE  = 2
1143 };
1144
1145 enum {
1146         MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP  = 1 << 0,
1147         MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP  = 1 << 1,
1148         MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
1149         MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
1150 };
1151
1152 enum {
1153         MLX5_UC_ADDR_CHANGE = (1 << 0),
1154         MLX5_MC_ADDR_CHANGE = (1 << 1),
1155         MLX5_VLAN_CHANGE    = (1 << 2),
1156         MLX5_PROMISC_CHANGE = (1 << 3),
1157         MLX5_MTU_CHANGE     = (1 << 4),
1158 };
1159
1160 enum mlx5_list_type {
1161         MLX5_NIC_VPORT_LIST_TYPE_UC   = 0x0,
1162         MLX5_NIC_VPORT_LIST_TYPE_MC   = 0x1,
1163         MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
1164 };
1165
1166 enum {
1167         MLX5_ESW_VPORT_ADMIN_STATE_DOWN  = 0x0,
1168         MLX5_ESW_VPORT_ADMIN_STATE_UP    = 0x1,
1169         MLX5_ESW_VPORT_ADMIN_STATE_AUTO  = 0x2,
1170 };
1171
1172 /* MLX5 DEV CAPs */
1173
1174 /* TODO: EAT.ME */
1175 enum mlx5_cap_mode {
1176         HCA_CAP_OPMOD_GET_MAX   = 0,
1177         HCA_CAP_OPMOD_GET_CUR   = 1,
1178 };
1179
1180 enum mlx5_cap_type {
1181         MLX5_CAP_GENERAL = 0,
1182         MLX5_CAP_ETHERNET_OFFLOADS,
1183         MLX5_CAP_ODP,
1184         MLX5_CAP_ATOMIC,
1185         MLX5_CAP_ROCE,
1186         MLX5_CAP_IPOIB_OFFLOADS,
1187         MLX5_CAP_EOIB_OFFLOADS,
1188         MLX5_CAP_FLOW_TABLE,
1189         MLX5_CAP_ESWITCH_FLOW_TABLE,
1190         MLX5_CAP_ESWITCH,
1191         MLX5_CAP_SNAPSHOT,
1192         MLX5_CAP_VECTOR_CALC,
1193         MLX5_CAP_QOS,
1194         MLX5_CAP_DEBUG,
1195         /* NUM OF CAP Types */
1196         MLX5_CAP_NUM
1197 };
1198
1199 /* GET Dev Caps macros */
1200 #define MLX5_CAP_GEN(mdev, cap) \
1201         MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1202
1203 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1204         MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1205
1206 #define MLX5_CAP_ETH(mdev, cap) \
1207         MLX5_GET(per_protocol_networking_offload_caps,\
1208                  mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1209
1210 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1211         MLX5_GET(per_protocol_networking_offload_caps,\
1212                  mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1213
1214 #define MLX5_CAP_ROCE(mdev, cap) \
1215         MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1216
1217 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1218         MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1219
1220 #define MLX5_CAP_ATOMIC(mdev, cap) \
1221         MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1222
1223 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1224         MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1225
1226 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1227         MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1228
1229 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1230         MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1231
1232 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1233         MLX5_GET(flow_table_eswitch_cap, \
1234                  mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1235
1236 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1237         MLX5_GET(flow_table_eswitch_cap, \
1238                  mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1239
1240 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1241         MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1242
1243 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1244         MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1245
1246 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1247         MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1248
1249 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1250         MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1251
1252 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1253         MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1254
1255 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1256         MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1257
1258 #define MLX5_CAP_ESW(mdev, cap) \
1259         MLX5_GET(e_switch_cap, \
1260                  mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1261
1262 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1263         MLX5_GET(e_switch_cap, \
1264                  mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1265
1266 #define MLX5_CAP_ODP(mdev, cap)\
1267         MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1268
1269 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1270         MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
1271
1272 #define MLX5_CAP_SNAPSHOT(mdev, cap) \
1273         MLX5_GET(snapshot_cap, \
1274                  mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
1275
1276 #define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
1277         MLX5_GET(snapshot_cap, \
1278                  mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
1279
1280 #define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
1281         MLX5_GET(per_protocol_networking_offload_caps,\
1282                  mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
1283
1284 #define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
1285         MLX5_GET(per_protocol_networking_offload_caps,\
1286                  mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
1287
1288 #define MLX5_CAP_DEBUG(mdev, cap) \
1289         MLX5_GET(debug_cap, \
1290                  mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
1291
1292 #define MLX5_CAP_DEBUG_MAX(mdev, cap) \
1293         MLX5_GET(debug_cap, \
1294                  mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
1295
1296 #define MLX5_CAP_QOS(mdev, cap) \
1297         MLX5_GET(qos_cap,\
1298                  mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1299
1300 #define MLX5_CAP_QOS_MAX(mdev, cap) \
1301         MLX5_GET(qos_cap,\
1302                  mdev->hca_caps_max[MLX5_CAP_QOS], cap)
1303
1304 enum {
1305         MLX5_CMD_STAT_OK                        = 0x0,
1306         MLX5_CMD_STAT_INT_ERR                   = 0x1,
1307         MLX5_CMD_STAT_BAD_OP_ERR                = 0x2,
1308         MLX5_CMD_STAT_BAD_PARAM_ERR             = 0x3,
1309         MLX5_CMD_STAT_BAD_SYS_STATE_ERR         = 0x4,
1310         MLX5_CMD_STAT_BAD_RES_ERR               = 0x5,
1311         MLX5_CMD_STAT_RES_BUSY                  = 0x6,
1312         MLX5_CMD_STAT_LIM_ERR                   = 0x8,
1313         MLX5_CMD_STAT_BAD_RES_STATE_ERR         = 0x9,
1314         MLX5_CMD_STAT_IX_ERR                    = 0xa,
1315         MLX5_CMD_STAT_NO_RES_ERR                = 0xf,
1316         MLX5_CMD_STAT_BAD_INP_LEN_ERR           = 0x50,
1317         MLX5_CMD_STAT_BAD_OUTP_LEN_ERR          = 0x51,
1318         MLX5_CMD_STAT_BAD_QP_STATE_ERR          = 0x10,
1319         MLX5_CMD_STAT_BAD_PKT_ERR               = 0x30,
1320         MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR    = 0x40,
1321 };
1322
1323 enum {
1324         MLX5_IEEE_802_3_COUNTERS_GROUP        = 0x0,
1325         MLX5_RFC_2863_COUNTERS_GROUP          = 0x1,
1326         MLX5_RFC_2819_COUNTERS_GROUP          = 0x2,
1327         MLX5_RFC_3635_COUNTERS_GROUP          = 0x3,
1328         MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1329         MLX5_ETHERNET_DISCARD_COUNTERS_GROUP  = 0x6,
1330         MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1331         MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1332         MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1333         MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1334         MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1335 };
1336
1337 enum {
1338         MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
1339         MLX5_PCIE_LANE_COUNTERS_GROUP         = 0x1,
1340         MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
1341 };
1342
1343 enum {
1344         MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
1345         MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
1346 };
1347
1348 enum {
1349         NUM_DRIVER_UARS = 4,
1350         NUM_LOW_LAT_UUARS = 4,
1351 };
1352
1353 enum {
1354         MLX5_CAP_PORT_TYPE_IB  = 0x0,
1355         MLX5_CAP_PORT_TYPE_ETH = 0x1,
1356 };
1357
1358 enum {
1359         MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2           = 0x0,
1360         MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
1361         MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
1362 };
1363
1364 enum {
1365         MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
1366 };
1367
1368 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1369 {
1370         if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1371                 return 0;
1372         return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1373 }
1374
1375 struct mlx5_ifc_mcia_reg_bits {
1376         u8         l[0x1];
1377         u8         reserved_0[0x7];
1378         u8         module[0x8];
1379         u8         reserved_1[0x8];
1380         u8         status[0x8];
1381
1382         u8         i2c_device_address[0x8];
1383         u8         page_number[0x8];
1384         u8         device_address[0x10];
1385
1386         u8         reserved_2[0x10];
1387         u8         size[0x10];
1388
1389         u8         reserved_3[0x20];
1390
1391         u8         dword_0[0x20];
1392         u8         dword_1[0x20];
1393         u8         dword_2[0x20];
1394         u8         dword_3[0x20];
1395         u8         dword_4[0x20];
1396         u8         dword_5[0x20];
1397         u8         dword_6[0x20];
1398         u8         dword_7[0x20];
1399         u8         dword_8[0x20];
1400         u8         dword_9[0x20];
1401         u8         dword_10[0x20];
1402         u8         dword_11[0x20];
1403 };
1404
1405 #define MLX5_CMD_OP_QUERY_EEPROM 0x93c
1406
1407 struct mlx5_mini_cqe8 {
1408         union {
1409                 __be32 rx_hash_result;
1410                 __be16 checksum;
1411                 __be16 rsvd;
1412                 struct {
1413                         __be16 wqe_counter;
1414                         u8  s_wqe_opcode;
1415                         u8  reserved;
1416                 } s_wqe_info;
1417         };
1418         __be32 byte_cnt;
1419 };
1420
1421 enum {
1422         MLX5_NO_INLINE_DATA,
1423         MLX5_INLINE_DATA32_SEG,
1424         MLX5_INLINE_DATA64_SEG,
1425         MLX5_COMPRESSED,
1426 };
1427
1428 enum mlx5_exp_cqe_zip_recv_type {
1429         MLX5_CQE_FORMAT_HASH,
1430         MLX5_CQE_FORMAT_CSUM,
1431 };
1432
1433 #define MLX5E_CQE_FORMAT_MASK 0xc
1434 static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
1435 {
1436         return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
1437 }
1438
1439 enum {
1440         MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
1441 };
1442
1443 /* 8 regular priorities + 1 for multicast */
1444 #define MLX5_NUM_BYPASS_FTS     9
1445
1446 #endif /* MLX5_DEVICE_H */