2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/types.h>
38 #include <linux/mlx4/device.h>
40 #define MLX4_INVALID_LKEY 0x100
42 enum ib_m_qp_attr_mask {
43 IB_M_EXT_CLASS_1 = 1 << 28,
44 IB_M_EXT_CLASS_2 = 1 << 29,
45 IB_M_EXT_CLASS_3 = 1 << 30,
47 IB_M_QP_MOD_VEND_MASK = (IB_M_EXT_CLASS_1 | IB_M_EXT_CLASS_2 |
52 MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
53 MLX4_QP_OPTPAR_RRE = 1 << 1,
54 MLX4_QP_OPTPAR_RAE = 1 << 2,
55 MLX4_QP_OPTPAR_RWE = 1 << 3,
56 MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4,
57 MLX4_QP_OPTPAR_Q_KEY = 1 << 5,
58 MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
59 MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
60 MLX4_QP_OPTPAR_SRA_MAX = 1 << 8,
61 MLX4_QP_OPTPAR_RRA_MAX = 1 << 9,
62 MLX4_QP_OPTPAR_PM_STATE = 1 << 10,
63 MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
64 MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
65 MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
66 MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
67 MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
71 MLX4_QP_STATE_RST = 0,
72 MLX4_QP_STATE_INIT = 1,
73 MLX4_QP_STATE_RTR = 2,
74 MLX4_QP_STATE_RTS = 3,
75 MLX4_QP_STATE_SQER = 4,
76 MLX4_QP_STATE_SQD = 5,
77 MLX4_QP_STATE_ERR = 6,
78 MLX4_QP_STATE_SQ_DRAINING = 7,
92 MLX4_QP_PM_MIGRATED = 0x3,
93 MLX4_QP_PM_ARMED = 0x0,
94 MLX4_QP_PM_REARM = 0x1
99 MLX4_QP_BIT_SRE = 1 << 15,
100 MLX4_QP_BIT_SWE = 1 << 14,
101 MLX4_QP_BIT_SAE = 1 << 13,
103 MLX4_QP_BIT_RRE = 1 << 15,
104 MLX4_QP_BIT_RWE = 1 << 14,
105 MLX4_QP_BIT_RAE = 1 << 13,
106 MLX4_QP_BIT_RIC = 1 << 4,
107 MLX4_QP_BIT_COLL_SYNC_RQ = 1 << 2,
108 MLX4_QP_BIT_COLL_SYNC_SQ = 1 << 1,
109 MLX4_QP_BIT_COLL_MASTER = 1 << 0
113 MLX4_RSS_HASH_XOR = 0,
114 MLX4_RSS_HASH_TOP = 1,
116 MLX4_RSS_UDP_IPV6 = 1 << 0,
117 MLX4_RSS_UDP_IPV4 = 1 << 1,
118 MLX4_RSS_TCP_IPV6 = 1 << 2,
119 MLX4_RSS_IPV6 = 1 << 3,
120 MLX4_RSS_TCP_IPV4 = 1 << 4,
121 MLX4_RSS_IPV4 = 1 << 5,
123 /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
124 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
125 /* offset of being RSS indirection QP within mlx4_qp_context.flags */
126 MLX4_RSS_QPC_FLAG_OFFSET = 13,
129 struct mlx4_rss_context {
139 struct mlx4_qp_path {
142 u8 disable_pkey_check;
151 __be32 tclass_flowlabel;
163 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
164 MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
165 MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
167 enum { /* vlan_control */
168 MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
169 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
170 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
171 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1,/* 802.1p priorty tag*/
172 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
176 MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
177 MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
178 MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
182 MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
185 struct mlx4_qp_context {
195 struct mlx4_qp_path pri_path;
196 struct mlx4_qp_path alt_path;
199 __be32 next_send_psn;
202 __be32 last_acked_psn;
205 __be32 rnr_nextrecvpsn;
212 __be16 rq_wqe_counter;
213 __be16 sq_wqe_counter;
216 __be32 nummmcpeers_basemkey;
220 __be32 mtt_base_addr_l;
224 struct mlx4_update_qp_context {
226 __be64 primary_addr_path_mask;
227 __be64 secondary_addr_path_mask;
229 struct mlx4_qp_context qp_context;
234 MLX4_UPD_QP_MASK_PM_STATE = 32,
235 MLX4_UPD_QP_MASK_VSD = 33,
239 MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32,
240 MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32,
241 MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32,
242 MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32,
243 MLX4_UPD_QP_PATH_MASK_CV = 4 + 32,
244 MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32,
245 MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32,
246 MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32,
247 MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32,
248 MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32,
249 MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32,
250 MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32,
251 MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32,
252 MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32,
253 MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
254 MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
255 MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
256 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
257 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
261 MLX4_STRIP_VLAN = 1 << 30
265 /* Which firmware version adds support for NEC (NoErrorCompletion) bit */
266 #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
269 MLX4_WQE_CTRL_NEC = 1 << 29,
270 MLX4_WQE_CTRL_FENCE = 1 << 6,
271 MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
272 MLX4_WQE_CTRL_SOLICITED = 1 << 1,
273 MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
274 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
275 MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
276 MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
277 MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
280 struct mlx4_wqe_ctrl_seg {
286 * High 24 bits are SRC remote buffer; low 8 bits are flags:
287 * [7] SO (strong ordering)
288 * [5] TCP/UDP checksum
290 * [3:2] C (generate completion queue entry)
291 * [1] SE (solicited event)
292 * [0] FL (force loopback)
296 __be16 srcrb_flags16[2];
299 * imm is immediate data for send/RDMA write w/ immediate;
300 * also invalidation key for send with invalidate; input
301 * modifier for WQEs on CCQs.
307 MLX4_WQE_MLX_VL15 = 1 << 17,
308 MLX4_WQE_MLX_SLR = 1 << 16
311 struct mlx4_wqe_mlx_seg {
321 * [15:12] static rate
325 * [0] FL (force loopback)
332 struct mlx4_wqe_datagram_seg {
340 struct mlx4_wqe_lso_seg {
345 enum mlx4_wqe_bind_seg_flags2 {
346 MLX4_WQE_BIND_TYPE_2 = (1<<31),
347 MLX4_WQE_BIND_ZERO_BASED = (1<<30),
350 struct mlx4_wqe_bind_seg {
360 MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
361 MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
362 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
363 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
364 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
367 struct mlx4_wqe_fmr_seg {
378 struct mlx4_wqe_fmr_ext_seg {
384 __be32 wire_ref_tag_base;
385 __be32 mem_ref_tag_base;
388 struct mlx4_wqe_local_inval_seg {
395 struct mlx4_wqe_raddr_seg {
401 struct mlx4_wqe_atomic_seg {
406 struct mlx4_wqe_masked_atomic_seg {
409 __be64 swap_add_mask;
413 struct mlx4_wqe_data_seg {
420 MLX4_INLINE_ALIGN = 64,
421 MLX4_INLINE_SEG = 1 << 31,
424 struct mlx4_wqe_inline_seg {
428 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
429 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
430 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
431 int sqd_event, struct mlx4_qp *qp);
433 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
434 struct mlx4_qp_context *context);
436 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
437 struct mlx4_qp_context *context,
438 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
440 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
442 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
445 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
447 #endif /* MLX4_QP_H */