2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
33 #include "mlx5_core.h"
35 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
38 struct mlx5_qp_table *table = &dev->priv.qp_table;
39 struct mlx5_core_rsc_common *common;
41 spin_lock(&table->lock);
43 common = radix_tree_lookup(&table->tree, rsn);
45 atomic_inc(&common->refcount);
47 spin_unlock(&table->lock);
50 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
57 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
59 if (atomic_dec_and_test(&common->refcount))
60 complete(&common->free);
63 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
65 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
66 struct mlx5_core_qp *qp;
71 switch (common->res) {
73 qp = (struct mlx5_core_qp *)common;
74 qp->event(qp, event_type);
78 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
81 mlx5_core_put_rsc(common);
84 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
85 struct mlx5_core_qp *qp,
86 struct mlx5_create_qp_mbox_in *in,
89 struct mlx5_qp_table *table = &dev->priv.qp_table;
90 struct mlx5_create_qp_mbox_out out;
91 struct mlx5_destroy_qp_mbox_in din;
92 struct mlx5_destroy_qp_mbox_out dout;
96 memset(&out, 0, sizeof(out));
97 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
99 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
100 /* 0xffffff means we ask to work with cqe version 0 */
101 MLX5_SET(qpc, qpc, user_index, 0xffffff);
104 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
106 mlx5_core_warn(dev, "ret %d\n", err);
110 if (out.hdr.status) {
111 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
112 atomic_read(&dev->num_qps));
113 return mlx5_cmd_status_to_err(&out.hdr);
116 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
117 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
119 qp->common.res = MLX5_RES_QP;
120 spin_lock_irq(&table->lock);
121 err = radix_tree_insert(&table->tree, qp->qpn, qp);
122 spin_unlock_irq(&table->lock);
124 mlx5_core_warn(dev, "err %d\n", err);
128 qp->pid = curthread->td_proc->p_pid;
129 atomic_set(&qp->common.refcount, 1);
130 atomic_inc(&dev->num_qps);
131 init_completion(&qp->common.free);
136 memset(&din, 0, sizeof(din));
137 memset(&dout, 0, sizeof(dout));
138 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
139 din.qpn = cpu_to_be32(qp->qpn);
140 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
144 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
146 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
147 struct mlx5_core_qp *qp)
149 struct mlx5_destroy_qp_mbox_in in;
150 struct mlx5_destroy_qp_mbox_out out;
151 struct mlx5_qp_table *table = &dev->priv.qp_table;
156 spin_lock_irqsave(&table->lock, flags);
157 radix_tree_delete(&table->tree, qp->qpn);
158 spin_unlock_irqrestore(&table->lock, flags);
160 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
161 wait_for_completion(&qp->common.free);
163 memset(&in, 0, sizeof(in));
164 memset(&out, 0, sizeof(out));
165 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
166 in.qpn = cpu_to_be32(qp->qpn);
167 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
172 return mlx5_cmd_status_to_err(&out.hdr);
174 atomic_dec(&dev->num_qps);
177 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
179 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
180 enum mlx5_qp_state new_state,
181 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
182 struct mlx5_core_qp *qp)
184 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
185 [MLX5_QP_STATE_RST] = {
186 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
187 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
188 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
190 [MLX5_QP_STATE_INIT] = {
191 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
192 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
193 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
194 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
196 [MLX5_QP_STATE_RTR] = {
197 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
198 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
199 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
201 [MLX5_QP_STATE_RTS] = {
202 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
203 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
204 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
206 [MLX5_QP_STATE_SQD] = {
207 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
208 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
210 [MLX5_QP_STATE_SQER] = {
211 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
212 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
213 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
215 [MLX5_QP_STATE_ERR] = {
216 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
217 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
221 struct mlx5_modify_qp_mbox_out out;
225 if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
226 !optab[cur_state][new_state])
229 memset(&out, 0, sizeof(out));
230 op = optab[cur_state][new_state];
231 in->hdr.opcode = cpu_to_be16(op);
232 in->qpn = cpu_to_be32(qp->qpn);
233 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
237 return mlx5_cmd_status_to_err(&out.hdr);
239 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
241 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
243 struct mlx5_qp_table *table = &dev->priv.qp_table;
245 spin_lock_init(&table->lock);
246 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
249 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
253 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
254 struct mlx5_query_qp_mbox_out *out, int outlen)
256 struct mlx5_query_qp_mbox_in in;
259 memset(&in, 0, sizeof(in));
260 memset(out, 0, outlen);
261 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
262 in.qpn = cpu_to_be32(qp->qpn);
263 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
268 return mlx5_cmd_status_to_err(&out->hdr);
272 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
274 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
276 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
277 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
280 memset(in, 0, sizeof(in));
282 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
284 memset(out, 0, sizeof(out));
285 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
289 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
292 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
294 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
296 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
297 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
299 memset(in, 0, sizeof(in));
301 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
302 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
304 memset(out, 0, sizeof(out));
305 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
308 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);