2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 #ifndef __RDMA_COMMON__
32 #define __RDMA_COMMON__
33 /************************************************************************/
34 /* Add include to common rdma target for both eCore and protocol rdma driver */
35 /************************************************************************/
37 #define RDMA_RESERVED_LKEY (0) //Reserved lkey
38 #define RDMA_RING_PAGE_SIZE (0x1000) //4KB pages
40 #define RDMA_MAX_SGE_PER_SQ_WQE (4) //max number of SGEs in a single request
41 #define RDMA_MAX_SGE_PER_RQ_WQE (4) //max number of SGEs in a single request
43 #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) //max size of data in single request
45 #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
46 #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
48 #define RDMA_MAX_CQS (64*1024)
49 #define RDMA_MAX_TIDS (128*1024-1)
50 #define RDMA_MAX_PDS (64*1024)
51 #define RDMA_MAX_XRC_SRQS (1024)
52 #define RDMA_MAX_SRQS (32*1024)
54 #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
55 #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
56 #define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
58 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
63 __le16 srq_idx /* SRQ index */;
68 struct rdma_srq_producers
70 __le32 sge_prod /* Current produced sge in SRQ */;
71 __le32 wqe_prod /* Current produced WQE to SRQ */;
75 * rdma completion notification queue element
79 struct regpair cq_handle;
83 struct rdma_cqe_responder
85 struct regpair srq_wr_id;
86 struct regpair qp_handle;
87 __le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
89 __le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
90 __le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
92 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
93 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
94 #define RDMA_CQE_RESPONDER_TYPE_MASK 0x3 /* (use enum rdma_cqe_type) */
95 #define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
96 #define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1 /* r_key invalidated indicator */
97 #define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
98 #define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1 /* immediate data indicator */
99 #define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
100 #define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
101 #define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
102 #define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
103 #define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
107 struct rdma_cqe_requester
112 struct regpair qp_handle;
113 struct regpair reserved2;
117 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
118 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
119 #define RDMA_CQE_REQUESTER_TYPE_MASK 0x3 /* (use enum rdma_cqe_type) */
120 #define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
121 #define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
122 #define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
126 struct rdma_cqe_common
128 struct regpair reserved0;
129 struct regpair qp_handle;
132 #define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
133 #define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
134 #define RDMA_CQE_COMMON_TYPE_MASK 0x3 /* (use enum rdma_cqe_type) */
135 #define RDMA_CQE_COMMON_TYPE_SHIFT 1
136 #define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
137 #define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
142 * rdma completion queue element
146 struct rdma_cqe_responder resp;
147 struct rdma_cqe_requester req;
148 struct rdma_cqe_common cmn;
155 * CQE requester status enumeration
157 enum rdma_cqe_requester_status_enum
160 RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
161 RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
162 RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
163 RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
164 RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
165 RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
166 RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
167 RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
168 RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
169 RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
170 RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
171 MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
177 * CQE responder status enumeration
179 enum rdma_cqe_responder_status_enum
181 RDMA_CQE_RESP_STS_OK,
182 RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
183 RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
184 RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
185 RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
186 RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
187 RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
188 RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
189 MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
194 * CQE type enumeration
198 RDMA_CQE_TYPE_REQUESTER,
199 RDMA_CQE_TYPE_RESPONDER_RQ,
200 RDMA_CQE_TYPE_RESPONDER_SRQ,
201 RDMA_CQE_TYPE_INVALID,
207 * DIF Block size options
209 enum rdma_dif_block_size
211 RDMA_DIF_BLOCK_512=0,
212 RDMA_DIF_BLOCK_4096=1,
213 MAX_RDMA_DIF_BLOCK_SIZE
218 * DIF CRC initial value
220 enum rdma_dif_crc_seed
222 RDMA_DIF_CRC_SEED_0000=0,
223 RDMA_DIF_CRC_SEED_FFFF=1,
224 MAX_RDMA_DIF_CRC_SEED
229 * RDMA DIF Error Result Structure
231 struct rdma_dif_error_result
233 __le32 error_intervals /* Total number of error intervals in the IO. */;
234 __le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
236 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1 /* CRC error occurred. */
237 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
238 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1 /* App Tag error occurred. */
239 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
240 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1 /* Ref Tag error occurred. */
241 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
242 #define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
243 #define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
244 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
245 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
246 u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
253 enum rdma_dif_io_direction_flg
257 MAX_RDMA_DIF_IO_DIRECTION_FLG
262 * RDMA DIF Runt Result Structure
264 struct rdma_dif_runt_result
266 __le16 guard_tag /* CRC result of received IO. */;
272 * memory window type enumeration
287 #define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF /* key of memory relating to this RQ */
288 #define RDMA_RQ_SGE_L_KEY_SHIFT 0
289 #define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
290 #define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
291 #define RDMA_RQ_SGE_RESERVED0_MASK 0x7
292 #define RDMA_RQ_SGE_RESERVED0_SHIFT 29
296 struct rdma_sq_atomic_wqe
299 __le32 length /* Total data length (8 bytes for Atomic) */;
300 __le32 xrc_srq /* Valid only when XRC is set for the QP */;
301 u8 req_type /* Type of WQE */;
303 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
304 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
305 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
306 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
307 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
308 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
309 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1 /* Dont care for atomic wqe */
310 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
311 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for atomic wqe */
312 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
313 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* Should be 0 for atomic wqe */
314 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
315 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
316 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
317 u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
318 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
319 struct regpair remote_va /* remote virtual address */;
320 __le32 r_key /* Remote key */;
322 struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
323 struct regpair swap_data /* Swap or add data */;
328 * First element (16 bytes) of atomic wqe
330 struct rdma_sq_atomic_wqe_1st
333 __le32 length /* Total data length (8 bytes for Atomic) */;
334 __le32 xrc_srq /* Valid only when XRC is set for the QP */;
335 u8 req_type /* Type of WQE */;
337 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
338 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
339 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
340 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
341 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
342 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
343 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1 /* Dont care for atomic wqe */
344 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
345 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1 /* Should be 0 for atomic wqe */
346 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
347 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
348 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
349 u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
350 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
355 * Second element (16 bytes) of atomic wqe
357 struct rdma_sq_atomic_wqe_2nd
359 struct regpair remote_va /* remote virtual address */;
360 __le32 r_key /* Remote key */;
366 * Third element (16 bytes) of atomic wqe
368 struct rdma_sq_atomic_wqe_3rd
370 struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
371 struct regpair swap_data /* Swap or add data */;
375 struct rdma_sq_bind_wqe
379 u8 req_type /* Type of WQE */;
381 #define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
382 #define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
383 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
384 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
385 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
386 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
387 #define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1 /* Dont care for bind wqe */
388 #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
389 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for bind wqe */
390 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
391 #define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
392 #define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
393 u8 wqe_size /* Size of WQE in 16B chunks */;
394 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
396 #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 /* zero based indication */
397 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
398 #define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 /* (use enum rdma_mw_type) */
399 #define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
400 #define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
401 #define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
403 #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
404 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
405 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
406 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
407 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
408 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
409 #define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
410 #define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
411 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
412 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
413 #define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
414 #define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
416 u8 length_hi /* upper 8 bits of the registered MW length */;
417 __le32 length_lo /* lower 32 bits of the registered MW length */;
418 __le32 parent_l_key /* l_key of the parent MR */;
424 * First element (16 bytes) of bind wqe
426 struct rdma_sq_bind_wqe_1st
430 u8 req_type /* Type of WQE */;
432 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
433 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
434 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
435 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
436 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
437 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
438 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1 /* Dont care for bind wqe */
439 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
440 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1 /* Should be 0 for bind wqe */
441 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
442 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
443 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
444 u8 wqe_size /* Size of WQE in 16B chunks */;
445 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
450 * Second element (16 bytes) of bind wqe
452 struct rdma_sq_bind_wqe_2nd
455 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 /* zero based indication */
456 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
457 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 /* (use enum rdma_mw_type) */
458 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
459 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
460 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
462 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
463 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
464 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
465 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
466 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
467 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
468 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
469 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
470 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
471 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
472 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
473 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
475 u8 length_hi /* upper 8 bits of the registered MW length */;
476 __le32 length_lo /* lower 32 bits of the registered MW length */;
477 __le32 parent_l_key /* l_key of the parent MR */;
483 * Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
485 struct rdma_sq_common_wqe
488 u8 req_type /* Type of WQE */;
490 #define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
491 #define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
492 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
493 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
494 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
495 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
496 #define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
497 #define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
498 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
499 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
500 #define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
501 #define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
502 u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
503 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
507 struct rdma_sq_fmr_wqe
511 u8 req_type /* Type of WQE */;
513 #define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
514 #define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
515 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
516 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
517 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
518 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
519 #define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1 /* Dont care for FMR wqe */
520 #define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
521 #define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for FMR wqe */
522 #define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
523 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
524 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
525 #define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
526 #define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
527 u8 wqe_size /* Size of WQE in 16B chunks */;
528 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
530 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F /* 0 is 4k, 1 is 8k... */
531 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
532 #define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1 /* zero based indication */
533 #define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
534 #define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1 /* indication whether bind is enabled for this MR */
535 #define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
536 #define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
537 #define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
539 #define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
540 #define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
541 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
542 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
543 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
544 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
545 #define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
546 #define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
547 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
548 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
549 #define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
550 #define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
552 u8 length_hi /* upper 8 bits of the registered MR length */;
553 __le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
554 struct regpair pbl_addr /* Address of PBL */;
555 __le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
556 __le16 dif_app_tag /* App tag of all DIF Blocks. */;
557 __le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
558 __le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
560 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
561 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
562 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
563 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
564 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
565 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
566 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
567 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
568 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
569 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
570 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
571 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
572 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
573 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
574 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
575 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
576 #define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
577 #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
583 * First element (16 bytes) of fmr wqe
585 struct rdma_sq_fmr_wqe_1st
589 u8 req_type /* Type of WQE */;
591 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
592 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
593 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
594 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
595 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
596 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
597 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1 /* Dont care for FMR wqe */
598 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
599 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1 /* Should be 0 for FMR wqe */
600 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
601 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
602 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
603 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
604 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
605 u8 wqe_size /* Size of WQE in 16B chunks */;
606 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
611 * Second element (16 bytes) of fmr wqe
613 struct rdma_sq_fmr_wqe_2nd
616 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F /* 0 is 4k, 1 is 8k... */
617 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
618 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1 /* zero based indication */
619 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
620 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1 /* indication whether bind is enabled for this MR */
621 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
622 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
623 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
625 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
626 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
627 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
628 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
629 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
630 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
631 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
632 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
633 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
634 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
635 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
636 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
638 u8 length_hi /* upper 8 bits of the registered MR length */;
639 __le32 length_lo /* lower 32 bits of the registered MR length. */;
640 struct regpair pbl_addr /* Address of PBL */;
645 * Third element (16 bytes) of fmr wqe
647 struct rdma_sq_fmr_wqe_3rd
649 __le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
650 __le16 dif_app_tag /* App tag of all DIF Blocks. */;
651 __le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
652 __le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
654 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
655 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
656 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
657 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
658 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
659 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
660 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
661 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
662 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
663 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
664 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
665 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
666 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
667 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
668 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
669 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
674 struct rdma_sq_local_inv_wqe
676 struct regpair reserved;
677 __le32 inv_l_key /* The invalidate local key */;
678 u8 req_type /* Type of WQE */;
680 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
681 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
682 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
683 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
684 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
685 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
686 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1 /* Dont care for local invalidate wqe */
687 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
688 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1 /* Should be 0 for local invalidate wqe */
689 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
690 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
691 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
692 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
693 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
694 u8 wqe_size /* Size of WQE in 16B chunks */;
695 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
699 struct rdma_sq_rdma_wqe
701 __le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
702 __le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
703 __le32 xrc_srq /* Valid only when XRC is set for the QP */;
704 u8 req_type /* Type of WQE */;
706 #define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
707 #define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
708 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
709 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
710 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
711 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
712 #define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
713 #define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
714 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
715 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
716 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
717 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
718 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 /* If set, indicated read with invalidate WQE. iWARP only */
719 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
720 #define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
721 #define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
722 u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
723 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
724 struct regpair remote_va /* Remote virtual address */;
725 __le32 r_key /* Remote key */;
727 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
728 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
729 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
730 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
731 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
732 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
733 #define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
734 #define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
740 * First element (16 bytes) of rdma wqe
742 struct rdma_sq_rdma_wqe_1st
744 __le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
745 __le32 length /* Total data length */;
746 __le32 xrc_srq /* Valid only when XRC is set for the QP */;
747 u8 req_type /* Type of WQE */;
749 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
750 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
751 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
752 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
753 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
754 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
755 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
756 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
757 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
758 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
759 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1 /* If set, indicated host memory of this WQE is DIF protected. */
760 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
761 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK 0x1 /* If set, indicated read with invalidate WQE. iWARP only */
762 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT 6
763 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x1
764 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 7
765 u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
766 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
771 * Second element (16 bytes) of rdma wqe
773 struct rdma_sq_rdma_wqe_2nd
775 struct regpair remote_va /* Remote virtual address */;
776 __le32 r_key /* Remote key */;
778 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
779 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
780 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
781 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
782 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
783 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
784 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
785 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
791 * SQ WQE req type enumeration
793 enum rdma_sq_req_type
795 RDMA_SQ_REQ_TYPE_SEND,
796 RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
797 RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
798 RDMA_SQ_REQ_TYPE_RDMA_WR,
799 RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
800 RDMA_SQ_REQ_TYPE_RDMA_RD,
801 RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
802 RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
803 RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
804 RDMA_SQ_REQ_TYPE_FAST_MR,
805 RDMA_SQ_REQ_TYPE_BIND,
806 RDMA_SQ_REQ_TYPE_INVALID,
811 struct rdma_sq_send_wqe
813 __le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
814 __le32 length /* Total data length */;
815 __le32 xrc_srq /* Valid only when XRC is set for the QP */;
816 u8 req_type /* Type of WQE */;
818 #define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
819 #define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
820 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
821 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
822 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
823 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
824 #define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
825 #define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
826 #define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
827 #define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
828 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1 /* Should be 0 for send wqe */
829 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
830 #define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
831 #define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
832 u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
833 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
838 struct rdma_sq_send_wqe_1st
840 __le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
841 __le32 length /* Total data length */;
842 __le32 xrc_srq /* Valid only when XRC is set for the QP */;
843 u8 req_type /* Type of WQE */;
845 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1 /* If set, completion will be generated when the WQE is completed */
846 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
847 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
848 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
849 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1 /* If set, all pending operations will be completed before start processing this WQE */
850 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
851 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1 /* If set, signal the responder to generate a solicited event on this WQE */
852 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
853 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
854 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
855 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
856 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
857 u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
858 u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
862 struct rdma_sq_send_wqe_2st
870 __le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
876 struct rdma_srq_wqe_header
878 struct regpair wr_id;
879 u8 num_sges /* number of SGEs in WQE */;
895 struct rdma_srq_wqe_header header;
896 struct rdma_srq_sge sge;
903 * Rdma doorbell data for flags update
905 struct rdma_pwm_flags_data
907 __le16 icid /* internal CID */;
908 u8 agg_flags /* aggregative flags */;
914 * Rdma doorbell data for SQ and RQ
916 struct rdma_pwm_val16_data
918 __le16 icid /* internal CID */;
919 __le16 value /* aggregated value to update */;
923 union rdma_pwm_val16_data_union
925 struct rdma_pwm_val16_data as_struct /* Parameters field */;
931 * Rdma doorbell data for CQ
933 struct rdma_pwm_val32_data
935 __le16 icid /* internal CID */;
936 u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
938 #define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
939 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
940 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
941 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
942 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1 /* Connection type is iWARP */
943 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
944 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
945 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
946 #define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
947 #define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
948 __le32 value /* aggregated value to update */;
952 union rdma_pwm_val32_data_union
954 struct rdma_pwm_val32_data as_struct /* Parameters field */;
955 struct regpair as_repair;
958 #endif /* __RDMA_COMMON__ */