]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/mlx5_core/mlx5_qp.c
MFV r316873: 7233 dir_is_empty should open directory with CLOEXEC
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / mlx5_core / mlx5_qp.c
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28
29 #include <linux/gfp.h>
30 #include <dev/mlx5/qp.h>
31 #include <dev/mlx5/driver.h>
32
33 #include "mlx5_core.h"
34
35 #include "transobj.h"
36
37 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
38                                                  u32 rsn)
39 {
40         struct mlx5_qp_table *table = &dev->priv.qp_table;
41         struct mlx5_core_rsc_common *common;
42
43         spin_lock(&table->lock);
44
45         common = radix_tree_lookup(&table->tree, rsn);
46         if (common)
47                 atomic_inc(&common->refcount);
48
49         spin_unlock(&table->lock);
50
51         if (!common) {
52                 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
53                                rsn);
54                 return NULL;
55         }
56         return common;
57 }
58
59 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
60 {
61         if (atomic_dec_and_test(&common->refcount))
62                 complete(&common->free);
63 }
64
65 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
66 {
67         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68         struct mlx5_core_qp *qp;
69
70         if (!common)
71                 return;
72
73         switch (common->res) {
74         case MLX5_RES_QP:
75                 qp = (struct mlx5_core_qp *)common;
76                 qp->event(qp, event_type);
77                 break;
78
79         default:
80                 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
81         }
82
83         mlx5_core_put_rsc(common);
84 }
85
86 static int create_qprqsq_common(struct mlx5_core_dev *dev,
87                                 struct mlx5_core_qp *qp, int rsc_type)
88 {
89         struct mlx5_qp_table *table = &dev->priv.qp_table;
90         int err;
91
92         qp->common.res = rsc_type;
93
94         spin_lock_irq(&table->lock);
95         err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96         spin_unlock_irq(&table->lock);
97         if (err)
98                 return err;
99
100         atomic_set(&qp->common.refcount, 1);
101         init_completion(&qp->common.free);
102         qp->pid = curthread->td_proc->p_pid;
103
104         return 0;
105 }
106
107 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108                                   struct mlx5_core_qp *qp, int rsc_type)
109 {
110         struct mlx5_qp_table *table = &dev->priv.qp_table;
111         unsigned long flags;
112
113         spin_lock_irqsave(&table->lock, flags);
114         radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115         spin_unlock_irqrestore(&table->lock, flags);
116
117         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118         wait_for_completion(&qp->common.free);
119 }
120
121 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122                         struct mlx5_core_qp *qp,
123                         struct mlx5_create_qp_mbox_in *in,
124                         int inlen)
125 {
126         struct mlx5_create_qp_mbox_out out;
127         struct mlx5_destroy_qp_mbox_in din;
128         struct mlx5_destroy_qp_mbox_out dout;
129         int err;
130
131         memset(&out, 0, sizeof(out));
132         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
133
134         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
135         if (err) {
136                 mlx5_core_warn(dev, "ret %d\n", err);
137                 return err;
138         }
139
140         if (out.hdr.status) {
141                 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
142                                atomic_read(&dev->num_qps));
143                 return mlx5_cmd_status_to_err(&out.hdr);
144         }
145
146         qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
147         mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
148
149         err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
150         if (err)
151                 goto err_cmd;
152
153         atomic_inc(&dev->num_qps);
154
155         return 0;
156
157 err_cmd:
158         memset(&din, 0, sizeof(din));
159         memset(&dout, 0, sizeof(dout));
160         din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
161         din.qpn = cpu_to_be32(qp->qpn);
162         mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
163
164         return err;
165 }
166 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
167
168 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
169                          struct mlx5_core_qp *qp)
170 {
171         struct mlx5_destroy_qp_mbox_in in;
172         struct mlx5_destroy_qp_mbox_out out;
173         int err;
174
175
176         destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
177
178         memset(&in, 0, sizeof(in));
179         memset(&out, 0, sizeof(out));
180         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
181         in.qpn = cpu_to_be32(qp->qpn);
182         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
183         if (err)
184                 return err;
185
186         if (out.hdr.status)
187                 return mlx5_cmd_status_to_err(&out.hdr);
188
189         atomic_dec(&dev->num_qps);
190         return 0;
191 }
192 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
193
194 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
195                         struct mlx5_modify_qp_mbox_in *in, int sqd_event,
196                         struct mlx5_core_qp *qp)
197 {
198         struct mlx5_modify_qp_mbox_out out;
199         int err = 0;
200
201         memset(&out, 0, sizeof(out));
202         in->hdr.opcode = cpu_to_be16(operation);
203         in->qpn = cpu_to_be32(qp->qpn);
204         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
205         if (err)
206                 return err;
207
208         return mlx5_cmd_status_to_err(&out.hdr);
209 }
210 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
211
212 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
213 {
214         struct mlx5_qp_table *table = &dev->priv.qp_table;
215
216         spin_lock_init(&table->lock);
217         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
218 }
219
220 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
221 {
222 }
223
224 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
225                        struct mlx5_query_qp_mbox_out *out, int outlen)
226 {
227         struct mlx5_query_qp_mbox_in in;
228         int err;
229
230         memset(&in, 0, sizeof(in));
231         memset(out, 0, outlen);
232         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
233         in.qpn = cpu_to_be32(qp->qpn);
234         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
235         if (err)
236                 return err;
237
238         if (out->hdr.status)
239                 return mlx5_cmd_status_to_err(&out->hdr);
240
241         return err;
242 }
243 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
244
245 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
246 {
247         u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
248         u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
249         int err;
250
251         memset(in, 0, sizeof(in));
252
253         MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
254
255         memset(out, 0, sizeof(out));
256         err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
257         if (err)
258                 return err;
259
260         *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
261         return 0;
262 }
263 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
264
265 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
266 {
267         u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
268         u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
269
270         memset(in, 0, sizeof(in));
271
272         MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
273         MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
274
275         memset(out, 0, sizeof(out));
276         return mlx5_cmd_exec_check_status(dev, in,  sizeof(in),
277                                                out, sizeof(out));
278 }
279 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
280
281 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
282                          struct mlx5_core_dct *dct,
283                          struct mlx5_create_dct_mbox_in *in)
284 {
285         struct mlx5_qp_table *table = &dev->priv.qp_table;
286         struct mlx5_create_dct_mbox_out out;
287         struct mlx5_destroy_dct_mbox_in din;
288         struct mlx5_destroy_dct_mbox_out dout;
289         int err;
290
291         init_completion(&dct->drained);
292         memset(&out, 0, sizeof(out));
293         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);
294
295         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
296         if (err) {
297                 mlx5_core_warn(dev, "create DCT failed, ret %d", err);
298                 return err;
299         }
300
301         if (out.hdr.status)
302                 return mlx5_cmd_status_to_err(&out.hdr);
303
304         dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;
305
306         dct->common.res = MLX5_RES_DCT;
307         spin_lock_irq(&table->lock);
308         err = radix_tree_insert(&table->tree, dct->dctn, dct);
309         spin_unlock_irq(&table->lock);
310         if (err) {
311                 mlx5_core_warn(dev, "err %d", err);
312                 goto err_cmd;
313         }
314
315         dct->pid = curthread->td_proc->p_pid;
316         atomic_set(&dct->common.refcount, 1);
317         init_completion(&dct->common.free);
318
319         return 0;
320
321 err_cmd:
322         memset(&din, 0, sizeof(din));
323         memset(&dout, 0, sizeof(dout));
324         din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
325         din.dctn = cpu_to_be32(dct->dctn);
326         mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
327
328         return err;
329 }
330 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
331
332 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
333                                struct mlx5_core_dct *dct)
334 {
335         struct mlx5_drain_dct_mbox_out out;
336         struct mlx5_drain_dct_mbox_in in;
337         int err;
338
339         memset(&in, 0, sizeof(in));
340         memset(&out, 0, sizeof(out));
341         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DRAIN_DCT);
342         in.dctn = cpu_to_be32(dct->dctn);
343         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
344         if (err)
345                 return err;
346
347         if (out.hdr.status)
348                 return mlx5_cmd_status_to_err(&out.hdr);
349
350         return 0;
351 }
352
353 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
354                           struct mlx5_core_dct *dct)
355 {
356         struct mlx5_qp_table *table = &dev->priv.qp_table;
357         struct mlx5_destroy_dct_mbox_out out;
358         struct mlx5_destroy_dct_mbox_in in;
359         unsigned long flags;
360         int err;
361
362         err = mlx5_core_drain_dct(dev, dct);
363         if (err) {
364                 mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
365                 return err;
366         }
367
368         wait_for_completion(&dct->drained);
369
370         spin_lock_irqsave(&table->lock, flags);
371         if (radix_tree_delete(&table->tree, dct->dctn) != dct)
372                 mlx5_core_warn(dev, "dct delete differs\n");
373         spin_unlock_irqrestore(&table->lock, flags);
374
375         if (atomic_dec_and_test(&dct->common.refcount))
376                 complete(&dct->common.free);
377         wait_for_completion(&dct->common.free);
378
379         memset(&in, 0, sizeof(in));
380         memset(&out, 0, sizeof(out));
381         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
382         in.dctn = cpu_to_be32(dct->dctn);
383         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
384         if (err)
385                 return err;
386
387         if (out.hdr.status)
388                 return mlx5_cmd_status_to_err(&out.hdr);
389
390         return 0;
391 }
392 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
393
394 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
395                         struct mlx5_query_dct_mbox_out *out)
396 {
397         struct mlx5_query_dct_mbox_in in;
398         int err;
399
400         memset(&in, 0, sizeof(in));
401         memset(out, 0, sizeof(*out));
402         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_DCT);
403         in.dctn = cpu_to_be32(dct->dctn);
404         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
405         if (err)
406                 return err;
407
408         if (out->hdr.status)
409                 return mlx5_cmd_status_to_err(&out->hdr);
410
411         return err;
412 }
413 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
414
415 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
416 {
417         struct mlx5_arm_dct_mbox_out out;
418         struct mlx5_arm_dct_mbox_in in;
419         int err;
420
421         memset(&in, 0, sizeof(in));
422         memset(&out, 0, sizeof(out));
423
424         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
425         in.dctn = cpu_to_be32(dct->dctn);
426         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
427         if (err)
428                 return err;
429
430         if (out.hdr.status)
431                 return mlx5_cmd_status_to_err(&out.hdr);
432
433         return err;
434 }
435 EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
436
437 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
438                                 struct mlx5_core_qp *rq)
439 {
440         int err;
441
442         err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
443         if (err)
444                 return err;
445
446         err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
447         if (err)
448                 mlx5_core_destroy_rq(dev, rq->qpn);
449
450         return err;
451 }
452 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
453
454 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
455                                   struct mlx5_core_qp *rq)
456 {
457         destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
458         mlx5_core_destroy_rq(dev, rq->qpn);
459 }
460 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
461
462 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
463                                 struct mlx5_core_qp *sq)
464 {
465         int err;
466
467         err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
468         if (err)
469                 return err;
470
471         err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
472         if (err)
473                 mlx5_core_destroy_sq(dev, sq->qpn);
474
475         return err;
476 }
477 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
478
479 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
480                                   struct mlx5_core_qp *sq)
481 {
482         destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
483         mlx5_core_destroy_sq(dev, sq->qpn);
484 }
485 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);