2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
33 #include "mlx5_core.h"
37 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
40 struct mlx5_qp_table *table = &dev->priv.qp_table;
41 struct mlx5_core_rsc_common *common;
43 spin_lock(&table->lock);
45 common = radix_tree_lookup(&table->tree, rsn);
47 atomic_inc(&common->refcount);
49 spin_unlock(&table->lock);
52 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
61 if (atomic_dec_and_test(&common->refcount))
62 complete(&common->free);
65 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
67 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68 struct mlx5_core_qp *qp;
73 switch (common->res) {
75 qp = (struct mlx5_core_qp *)common;
76 qp->event(qp, event_type);
80 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
83 mlx5_core_put_rsc(common);
86 static int create_qprqsq_common(struct mlx5_core_dev *dev,
87 struct mlx5_core_qp *qp, int rsc_type)
89 struct mlx5_qp_table *table = &dev->priv.qp_table;
92 qp->common.res = rsc_type;
94 spin_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96 spin_unlock_irq(&table->lock);
100 atomic_set(&qp->common.refcount, 1);
101 init_completion(&qp->common.free);
102 qp->pid = curthread->td_proc->p_pid;
107 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108 struct mlx5_core_qp *qp, int rsc_type)
110 struct mlx5_qp_table *table = &dev->priv.qp_table;
113 spin_lock_irqsave(&table->lock, flags);
114 radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115 spin_unlock_irqrestore(&table->lock, flags);
117 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118 wait_for_completion(&qp->common.free);
121 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122 struct mlx5_core_qp *qp,
123 struct mlx5_create_qp_mbox_in *in,
126 struct mlx5_create_qp_mbox_out out;
127 struct mlx5_destroy_qp_mbox_in din;
128 struct mlx5_destroy_qp_mbox_out dout;
131 memset(&out, 0, sizeof(out));
132 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
134 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
136 mlx5_core_warn(dev, "ret %d\n", err);
140 if (out.hdr.status) {
141 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
142 atomic_read(&dev->num_qps));
143 return mlx5_cmd_status_to_err(&out.hdr);
146 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
147 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
149 err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
153 atomic_inc(&dev->num_qps);
158 memset(&din, 0, sizeof(din));
159 memset(&dout, 0, sizeof(dout));
160 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
161 din.qpn = cpu_to_be32(qp->qpn);
162 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
166 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
168 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
169 struct mlx5_core_qp *qp)
171 struct mlx5_destroy_qp_mbox_in in;
172 struct mlx5_destroy_qp_mbox_out out;
176 destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
178 memset(&in, 0, sizeof(in));
179 memset(&out, 0, sizeof(out));
180 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
181 in.qpn = cpu_to_be32(qp->qpn);
182 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
187 return mlx5_cmd_status_to_err(&out.hdr);
189 atomic_dec(&dev->num_qps);
192 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
194 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
195 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
196 struct mlx5_core_qp *qp)
198 struct mlx5_modify_qp_mbox_out out;
201 memset(&out, 0, sizeof(out));
202 in->hdr.opcode = cpu_to_be16(operation);
203 in->qpn = cpu_to_be32(qp->qpn);
204 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
208 return mlx5_cmd_status_to_err(&out.hdr);
210 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
212 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
214 struct mlx5_qp_table *table = &dev->priv.qp_table;
216 memset(table, 0, sizeof(*table));
217 spin_lock_init(&table->lock);
218 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
221 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
225 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
226 struct mlx5_query_qp_mbox_out *out, int outlen)
228 struct mlx5_query_qp_mbox_in in;
231 memset(&in, 0, sizeof(in));
232 memset(out, 0, outlen);
233 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
234 in.qpn = cpu_to_be32(qp->qpn);
235 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
240 return mlx5_cmd_status_to_err(&out->hdr);
244 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
246 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
248 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
249 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
252 memset(in, 0, sizeof(in));
254 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
256 memset(out, 0, sizeof(out));
257 err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
261 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
264 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
266 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
268 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
269 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
271 memset(in, 0, sizeof(in));
273 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
274 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
276 memset(out, 0, sizeof(out));
277 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
280 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
282 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
283 struct mlx5_core_dct *dct,
284 struct mlx5_create_dct_mbox_in *in)
286 struct mlx5_qp_table *table = &dev->priv.qp_table;
287 struct mlx5_create_dct_mbox_out out;
288 struct mlx5_destroy_dct_mbox_in din;
289 struct mlx5_destroy_dct_mbox_out dout;
292 init_completion(&dct->drained);
293 memset(&out, 0, sizeof(out));
294 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);
296 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
298 mlx5_core_warn(dev, "create DCT failed, ret %d", err);
303 return mlx5_cmd_status_to_err(&out.hdr);
305 dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;
307 dct->common.res = MLX5_RES_DCT;
308 spin_lock_irq(&table->lock);
309 err = radix_tree_insert(&table->tree, dct->dctn, dct);
310 spin_unlock_irq(&table->lock);
312 mlx5_core_warn(dev, "err %d", err);
316 dct->pid = curthread->td_proc->p_pid;
317 atomic_set(&dct->common.refcount, 1);
318 init_completion(&dct->common.free);
323 memset(&din, 0, sizeof(din));
324 memset(&dout, 0, sizeof(dout));
325 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
326 din.dctn = cpu_to_be32(dct->dctn);
327 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
331 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
333 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
334 struct mlx5_core_dct *dct)
336 struct mlx5_drain_dct_mbox_out out;
337 struct mlx5_drain_dct_mbox_in in;
340 memset(&in, 0, sizeof(in));
341 memset(&out, 0, sizeof(out));
342 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DRAIN_DCT);
343 in.dctn = cpu_to_be32(dct->dctn);
344 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
349 return mlx5_cmd_status_to_err(&out.hdr);
354 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
355 struct mlx5_core_dct *dct)
357 struct mlx5_qp_table *table = &dev->priv.qp_table;
358 struct mlx5_destroy_dct_mbox_out out;
359 struct mlx5_destroy_dct_mbox_in in;
363 err = mlx5_core_drain_dct(dev, dct);
365 mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
369 wait_for_completion(&dct->drained);
371 spin_lock_irqsave(&table->lock, flags);
372 if (radix_tree_delete(&table->tree, dct->dctn) != dct)
373 mlx5_core_warn(dev, "dct delete differs\n");
374 spin_unlock_irqrestore(&table->lock, flags);
376 if (atomic_dec_and_test(&dct->common.refcount))
377 complete(&dct->common.free);
378 wait_for_completion(&dct->common.free);
380 memset(&in, 0, sizeof(in));
381 memset(&out, 0, sizeof(out));
382 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
383 in.dctn = cpu_to_be32(dct->dctn);
384 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
389 return mlx5_cmd_status_to_err(&out.hdr);
393 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
395 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
396 struct mlx5_query_dct_mbox_out *out)
398 struct mlx5_query_dct_mbox_in in;
401 memset(&in, 0, sizeof(in));
402 memset(out, 0, sizeof(*out));
403 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_DCT);
404 in.dctn = cpu_to_be32(dct->dctn);
405 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
410 return mlx5_cmd_status_to_err(&out->hdr);
414 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
416 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
418 struct mlx5_arm_dct_mbox_out out;
419 struct mlx5_arm_dct_mbox_in in;
422 memset(&in, 0, sizeof(in));
423 memset(&out, 0, sizeof(out));
425 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
426 in.dctn = cpu_to_be32(dct->dctn);
427 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
432 return mlx5_cmd_status_to_err(&out.hdr);
436 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
438 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
439 struct mlx5_core_qp *rq)
443 err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
447 err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
449 mlx5_core_destroy_rq(dev, rq->qpn);
453 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
455 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
456 struct mlx5_core_qp *rq)
458 destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
459 mlx5_core_destroy_rq(dev, rq->qpn);
461 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
463 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
464 struct mlx5_core_qp *sq)
468 err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
472 err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
474 mlx5_core_destroy_sq(dev, sq->qpn);
478 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
480 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
481 struct mlx5_core_qp *sq)
483 destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
484 mlx5_core_destroy_sq(dev, sq->qpn);
486 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);