]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/mlx5_core/mlx5_qp.c
MFC r330644 and r330714:
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / mlx5_core / mlx5_qp.c
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
32
33 #include "mlx5_core.h"
34
35 #include "transobj.h"
36
37 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
38                                                  u32 rsn)
39 {
40         struct mlx5_qp_table *table = &dev->priv.qp_table;
41         struct mlx5_core_rsc_common *common;
42
43         spin_lock(&table->lock);
44
45         common = radix_tree_lookup(&table->tree, rsn);
46         if (common)
47                 atomic_inc(&common->refcount);
48
49         spin_unlock(&table->lock);
50
51         if (!common) {
52                 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
53                                rsn);
54                 return NULL;
55         }
56         return common;
57 }
58
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
60 {
61         if (atomic_dec_and_test(&common->refcount))
62                 complete(&common->free);
63 }
64
65 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
66 {
67         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68         struct mlx5_core_qp *qp;
69
70         if (!common)
71                 return;
72
73         switch (common->res) {
74         case MLX5_RES_QP:
75                 qp = (struct mlx5_core_qp *)common;
76                 qp->event(qp, event_type);
77                 break;
78
79         default:
80                 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
81         }
82
83         mlx5_core_put_rsc(common);
84 }
85
86 static int create_qprqsq_common(struct mlx5_core_dev *dev,
87                                 struct mlx5_core_qp *qp, int rsc_type)
88 {
89         struct mlx5_qp_table *table = &dev->priv.qp_table;
90         int err;
91
92         qp->common.res = rsc_type;
93
94         spin_lock_irq(&table->lock);
95         err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96         spin_unlock_irq(&table->lock);
97         if (err)
98                 return err;
99
100         atomic_set(&qp->common.refcount, 1);
101         init_completion(&qp->common.free);
102         qp->pid = curthread->td_proc->p_pid;
103
104         return 0;
105 }
106
107 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108                                   struct mlx5_core_qp *qp, int rsc_type)
109 {
110         struct mlx5_qp_table *table = &dev->priv.qp_table;
111         unsigned long flags;
112
113         spin_lock_irqsave(&table->lock, flags);
114         radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115         spin_unlock_irqrestore(&table->lock, flags);
116
117         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118         wait_for_completion(&qp->common.free);
119 }
120
121 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122                         struct mlx5_core_qp *qp,
123                         struct mlx5_create_qp_mbox_in *in,
124                         int inlen)
125 {
126         struct mlx5_create_qp_mbox_out out;
127         struct mlx5_destroy_qp_mbox_in din;
128         struct mlx5_destroy_qp_mbox_out dout;
129         int err;
130
131         memset(&out, 0, sizeof(out));
132         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
133
134         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
135         if (err) {
136                 mlx5_core_warn(dev, "ret %d\n", err);
137                 return err;
138         }
139
140         if (out.hdr.status) {
141                 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
142                                atomic_read(&dev->num_qps));
143                 return mlx5_cmd_status_to_err(&out.hdr);
144         }
145
146         qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
147         mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
148
149         err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
150         if (err)
151                 goto err_cmd;
152
153         atomic_inc(&dev->num_qps);
154
155         return 0;
156
157 err_cmd:
158         memset(&din, 0, sizeof(din));
159         memset(&dout, 0, sizeof(dout));
160         din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
161         din.qpn = cpu_to_be32(qp->qpn);
162         mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
163
164         return err;
165 }
166 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
167
168 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
169                          struct mlx5_core_qp *qp)
170 {
171         struct mlx5_destroy_qp_mbox_in in;
172         struct mlx5_destroy_qp_mbox_out out;
173         int err;
174
175
176         destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
177
178         memset(&in, 0, sizeof(in));
179         memset(&out, 0, sizeof(out));
180         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
181         in.qpn = cpu_to_be32(qp->qpn);
182         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
183         if (err)
184                 return err;
185
186         if (out.hdr.status)
187                 return mlx5_cmd_status_to_err(&out.hdr);
188
189         atomic_dec(&dev->num_qps);
190         return 0;
191 }
192 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
193
194 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
195                         struct mlx5_modify_qp_mbox_in *in, int sqd_event,
196                         struct mlx5_core_qp *qp)
197 {
198         struct mlx5_modify_qp_mbox_out out;
199         int err = 0;
200
201         memset(&out, 0, sizeof(out));
202         in->hdr.opcode = cpu_to_be16(operation);
203         in->qpn = cpu_to_be32(qp->qpn);
204         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
205         if (err)
206                 return err;
207
208         return mlx5_cmd_status_to_err(&out.hdr);
209 }
210 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
211
212 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
213 {
214         struct mlx5_qp_table *table = &dev->priv.qp_table;
215
216         memset(table, 0, sizeof(*table));
217         spin_lock_init(&table->lock);
218         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
219 }
220
221 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
222 {
223 }
224
225 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
226                        struct mlx5_query_qp_mbox_out *out, int outlen)
227 {
228         struct mlx5_query_qp_mbox_in in;
229         int err;
230
231         memset(&in, 0, sizeof(in));
232         memset(out, 0, outlen);
233         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
234         in.qpn = cpu_to_be32(qp->qpn);
235         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
236         if (err)
237                 return err;
238
239         if (out->hdr.status)
240                 return mlx5_cmd_status_to_err(&out->hdr);
241
242         return err;
243 }
244 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
245
246 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
247 {
248         u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
249         u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
250         int err;
251
252         memset(in, 0, sizeof(in));
253
254         MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
255
256         memset(out, 0, sizeof(out));
257         err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
258         if (err)
259                 return err;
260
261         *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
262         return 0;
263 }
264 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
265
266 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
267 {
268         u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
269         u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
270
271         memset(in, 0, sizeof(in));
272
273         MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
274         MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
275
276         memset(out, 0, sizeof(out));
277         return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
278                                                out, sizeof(out));
279 }
280 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
281
282 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
283                          struct mlx5_core_dct *dct,
284                          struct mlx5_create_dct_mbox_in *in)
285 {
286         struct mlx5_qp_table *table = &dev->priv.qp_table;
287         struct mlx5_create_dct_mbox_out out;
288         struct mlx5_destroy_dct_mbox_in din;
289         struct mlx5_destroy_dct_mbox_out dout;
290         int err;
291
292         init_completion(&dct->drained);
293         memset(&out, 0, sizeof(out));
294         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);
295
296         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
297         if (err) {
298                 mlx5_core_warn(dev, "create DCT failed, ret %d", err);
299                 return err;
300         }
301
302         if (out.hdr.status)
303                 return mlx5_cmd_status_to_err(&out.hdr);
304
305         dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;
306
307         dct->common.res = MLX5_RES_DCT;
308         spin_lock_irq(&table->lock);
309         err = radix_tree_insert(&table->tree, dct->dctn, dct);
310         spin_unlock_irq(&table->lock);
311         if (err) {
312                 mlx5_core_warn(dev, "err %d", err);
313                 goto err_cmd;
314         }
315
316         dct->pid = curthread->td_proc->p_pid;
317         atomic_set(&dct->common.refcount, 1);
318         init_completion(&dct->common.free);
319
320         return 0;
321
322 err_cmd:
323         memset(&din, 0, sizeof(din));
324         memset(&dout, 0, sizeof(dout));
325         din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
326         din.dctn = cpu_to_be32(dct->dctn);
327         mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
328
329         return err;
330 }
331 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
332
333 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
334                                struct mlx5_core_dct *dct)
335 {
336         struct mlx5_drain_dct_mbox_out out;
337         struct mlx5_drain_dct_mbox_in in;
338         int err;
339
340         memset(&in, 0, sizeof(in));
341         memset(&out, 0, sizeof(out));
342         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DRAIN_DCT);
343         in.dctn = cpu_to_be32(dct->dctn);
344         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
345         if (err)
346                 return err;
347
348         if (out.hdr.status)
349                 return mlx5_cmd_status_to_err(&out.hdr);
350
351         return 0;
352 }
353
354 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
355                           struct mlx5_core_dct *dct)
356 {
357         struct mlx5_qp_table *table = &dev->priv.qp_table;
358         struct mlx5_destroy_dct_mbox_out out;
359         struct mlx5_destroy_dct_mbox_in in;
360         unsigned long flags;
361         int err;
362
363         err = mlx5_core_drain_dct(dev, dct);
364         if (err) {
365                 mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
366                 return err;
367         }
368
369         wait_for_completion(&dct->drained);
370
371         spin_lock_irqsave(&table->lock, flags);
372         if (radix_tree_delete(&table->tree, dct->dctn) != dct)
373                 mlx5_core_warn(dev, "dct delete differs\n");
374         spin_unlock_irqrestore(&table->lock, flags);
375
376         if (atomic_dec_and_test(&dct->common.refcount))
377                 complete(&dct->common.free);
378         wait_for_completion(&dct->common.free);
379
380         memset(&in, 0, sizeof(in));
381         memset(&out, 0, sizeof(out));
382         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
383         in.dctn = cpu_to_be32(dct->dctn);
384         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
385         if (err)
386                 return err;
387
388         if (out.hdr.status)
389                 return mlx5_cmd_status_to_err(&out.hdr);
390
391         return 0;
392 }
393 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
394
395 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
396                         struct mlx5_query_dct_mbox_out *out)
397 {
398         struct mlx5_query_dct_mbox_in in;
399         int err;
400
401         memset(&in, 0, sizeof(in));
402         memset(out, 0, sizeof(*out));
403         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_DCT);
404         in.dctn = cpu_to_be32(dct->dctn);
405         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
406         if (err)
407                 return err;
408
409         if (out->hdr.status)
410                 return mlx5_cmd_status_to_err(&out->hdr);
411
412         return err;
413 }
414 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
415
416 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
417 {
418         struct mlx5_arm_dct_mbox_out out;
419         struct mlx5_arm_dct_mbox_in in;
420         int err;
421
422         memset(&in, 0, sizeof(in));
423         memset(&out, 0, sizeof(out));
424
425         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
426         in.dctn = cpu_to_be32(dct->dctn);
427         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
428         if (err)
429                 return err;
430
431         if (out.hdr.status)
432                 return mlx5_cmd_status_to_err(&out.hdr);
433
434         return err;
435 }
436 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
437
438 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
439                                 struct mlx5_core_qp *rq)
440 {
441         int err;
442
443         err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
444         if (err)
445                 return err;
446
447         err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
448         if (err)
449                 mlx5_core_destroy_rq(dev, rq->qpn);
450
451         return err;
452 }
453 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
454
455 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
456                                   struct mlx5_core_qp *rq)
457 {
458         destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
459         mlx5_core_destroy_rq(dev, rq->qpn);
460 }
461 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
462
463 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
464                                 struct mlx5_core_qp *sq)
465 {
466         int err;
467
468         err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
469         if (err)
470                 return err;
471
472         err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
473         if (err)
474                 mlx5_core_destroy_sq(dev, sq->qpn);
475
476         return err;
477 }
478 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
479
480 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
481                                   struct mlx5_core_qp *sq)
482 {
483         destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
484         mlx5_core_destroy_sq(dev, sq->qpn);
485 }
486 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);