2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
33 #include "mlx5_core.h"
37 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
40 struct mlx5_qp_table *table = &dev->priv.qp_table;
41 struct mlx5_core_rsc_common *common;
43 spin_lock(&table->lock);
45 common = radix_tree_lookup(&table->tree, rsn);
47 atomic_inc(&common->refcount);
49 spin_unlock(&table->lock);
52 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
61 if (atomic_dec_and_test(&common->refcount))
62 complete(&common->free);
65 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
67 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68 struct mlx5_core_qp *qp;
73 switch (common->res) {
75 qp = (struct mlx5_core_qp *)common;
76 qp->event(qp, event_type);
80 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
83 mlx5_core_put_rsc(common);
86 static int create_qprqsq_common(struct mlx5_core_dev *dev,
87 struct mlx5_core_qp *qp, int rsc_type)
89 struct mlx5_qp_table *table = &dev->priv.qp_table;
92 qp->common.res = rsc_type;
94 spin_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96 spin_unlock_irq(&table->lock);
100 atomic_set(&qp->common.refcount, 1);
101 init_completion(&qp->common.free);
102 qp->pid = curthread->td_proc->p_pid;
107 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108 struct mlx5_core_qp *qp, int rsc_type)
110 struct mlx5_qp_table *table = &dev->priv.qp_table;
113 spin_lock_irqsave(&table->lock, flags);
114 radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115 spin_unlock_irqrestore(&table->lock, flags);
117 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118 wait_for_completion(&qp->common.free);
121 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122 struct mlx5_core_qp *qp,
125 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
126 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
127 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
130 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
132 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
136 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
137 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
139 err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
143 atomic_inc(&dev->num_qps);
148 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
149 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
150 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
153 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
155 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
156 struct mlx5_core_qp *qp)
158 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
159 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
163 destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
165 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
166 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
167 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
171 atomic_dec(&dev->num_qps);
174 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
183 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
186 mbox->outlen = outlen;
187 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
188 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
189 if (!mbox->in || !mbox->out) {
198 static void mbox_free(struct mbox_info *mbox)
204 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
205 u32 opt_param_mask, void *qpc,
206 struct mbox_info *mbox)
211 #define MBOX_ALLOC(mbox, typ) \
212 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
214 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
215 MLX5_SET(typ##_in, in, opcode, _opcode); \
216 MLX5_SET(typ##_in, in, qpn, _qpn)
217 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
218 MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
219 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
220 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
224 case MLX5_CMD_OP_2RST_QP:
225 if (MBOX_ALLOC(mbox, qp_2rst))
227 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
229 case MLX5_CMD_OP_2ERR_QP:
230 if (MBOX_ALLOC(mbox, qp_2err))
232 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
235 /* MODIFY with QPC */
236 case MLX5_CMD_OP_RST2INIT_QP:
237 if (MBOX_ALLOC(mbox, rst2init_qp))
239 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
240 opt_param_mask, qpc);
242 case MLX5_CMD_OP_INIT2RTR_QP:
243 if (MBOX_ALLOC(mbox, init2rtr_qp))
245 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
246 opt_param_mask, qpc);
248 case MLX5_CMD_OP_RTR2RTS_QP:
249 if (MBOX_ALLOC(mbox, rtr2rts_qp))
251 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
252 opt_param_mask, qpc);
254 case MLX5_CMD_OP_RTS2RTS_QP:
255 if (MBOX_ALLOC(mbox, rts2rts_qp))
257 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
258 opt_param_mask, qpc);
260 case MLX5_CMD_OP_SQERR2RTS_QP:
261 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
263 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
264 opt_param_mask, qpc);
266 case MLX5_CMD_OP_INIT2INIT_QP:
267 if (MBOX_ALLOC(mbox, init2init_qp))
269 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
270 opt_param_mask, qpc);
273 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
282 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
283 u32 opt_param_mask, void *qpc,
284 struct mlx5_core_qp *qp)
286 struct mbox_info mbox;
289 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
290 opt_param_mask, qpc, &mbox);
294 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
298 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
300 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
302 struct mlx5_qp_table *table = &dev->priv.qp_table;
304 memset(table, 0, sizeof(*table));
305 spin_lock_init(&table->lock);
306 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
309 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
313 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
314 u32 *out, int outlen)
316 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
318 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
319 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
321 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
323 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
325 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
327 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
328 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
331 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
332 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
334 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
337 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
339 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
341 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
342 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
344 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
345 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
346 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
348 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
350 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
351 struct mlx5_core_dct *dct,
354 struct mlx5_qp_table *table = &dev->priv.qp_table;
355 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
356 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
357 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
358 int inlen = MLX5_ST_SZ_BYTES(create_dct_in);
361 init_completion(&dct->drained);
362 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
364 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
366 mlx5_core_warn(dev, "create DCT failed, ret %d", err);
370 dct->dctn = MLX5_GET(create_dct_out, out, dctn);
372 dct->common.res = MLX5_RES_DCT;
373 spin_lock_irq(&table->lock);
374 err = radix_tree_insert(&table->tree, dct->dctn, dct);
375 spin_unlock_irq(&table->lock);
377 mlx5_core_warn(dev, "err %d", err);
381 dct->pid = curthread->td_proc->p_pid;
382 atomic_set(&dct->common.refcount, 1);
383 init_completion(&dct->common.free);
388 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
389 MLX5_SET(destroy_dct_in, din, dctn, dct->dctn);
390 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
394 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
396 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
397 struct mlx5_core_dct *dct)
399 u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
400 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
402 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
403 MLX5_SET(drain_dct_in, in, dctn, dct->dctn);
404 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
405 (void *)&out, sizeof(out));
408 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
409 struct mlx5_core_dct *dct)
411 struct mlx5_qp_table *table = &dev->priv.qp_table;
412 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
413 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
417 err = mlx5_core_drain_dct(dev, dct);
419 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
422 mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
427 wait_for_completion(&dct->drained);
430 spin_lock_irqsave(&table->lock, flags);
431 if (radix_tree_delete(&table->tree, dct->dctn) != dct)
432 mlx5_core_warn(dev, "dct delete differs\n");
433 spin_unlock_irqrestore(&table->lock, flags);
435 if (atomic_dec_and_test(&dct->common.refcount))
436 complete(&dct->common.free);
437 wait_for_completion(&dct->common.free);
439 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
440 MLX5_SET(destroy_dct_in, in, dctn, dct->dctn);
442 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
443 (void *)&out, sizeof(out));
445 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
447 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
448 u32 *out, int outlen)
450 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
452 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
453 MLX5_SET(query_dct_in, in, dctn, dct->dctn);
455 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
456 (void *)out, outlen);
458 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
460 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
462 u32 out[MLX5_ST_SZ_DW(arm_dct_out)] = {0};
463 u32 in[MLX5_ST_SZ_DW(arm_dct_in)] = {0};
465 MLX5_SET(arm_dct_in, in, opcode, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
466 MLX5_SET(arm_dct_in, in, dctn, dct->dctn);
468 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
469 (void *)&out, sizeof(out));
471 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
473 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
474 struct mlx5_core_qp *rq)
478 err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
482 err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
484 mlx5_core_destroy_rq(dev, rq->qpn);
488 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
490 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
491 struct mlx5_core_qp *rq)
493 destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
494 mlx5_core_destroy_rq(dev, rq->qpn);
496 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
498 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
499 struct mlx5_core_qp *sq)
503 err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
507 err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
509 mlx5_core_destroy_sq(dev, sq->qpn);
513 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
515 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
516 struct mlx5_core_qp *sq)
518 destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
519 mlx5_core_destroy_sq(dev, sq->qpn);
521 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);