2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/srq.h>
32 #include <rdma/ib_verbs.h>
33 #include "mlx5_core.h"
36 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
38 struct mlx5_srq_table *table = &dev->priv.srq_table;
39 struct mlx5_core_srq *srq;
41 spin_lock(&table->lock);
43 srq = radix_tree_lookup(&table->tree, srqn);
45 atomic_inc(&srq->refcount);
47 spin_unlock(&table->lock);
50 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
54 srq->event(srq, event_type);
56 if (atomic_dec_and_test(&srq->refcount))
60 static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
62 void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
65 switch (MLX5_GET(srqc, srqc, state)) {
66 case MLX5_SRQC_STATE_GOOD:
67 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
69 case MLX5_SRQC_STATE_ERROR:
70 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
73 printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state));
76 MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
77 MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
78 MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
79 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
80 MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
81 MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
82 MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
83 MLX5_SET64(wq, wq, dbr_addr,
84 ((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 |
85 ((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2);
87 switch (MLX5_GET(rmpc, rmpc, state)) {
88 case MLX5_RMPC_STATE_RDY:
89 MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
91 case MLX5_RMPC_STATE_ERR:
92 MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
95 printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state));
98 MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
99 MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
100 MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
101 MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
102 MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
103 MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
104 MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
105 MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32);
106 MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff);
110 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
112 struct mlx5_srq_table *table = &dev->priv.srq_table;
113 struct mlx5_core_srq *srq;
115 spin_lock(&table->lock);
117 srq = radix_tree_lookup(&table->tree, srqn);
119 atomic_inc(&srq->refcount);
121 spin_unlock(&table->lock);
125 EXPORT_SYMBOL(mlx5_core_get_srq);
127 static int get_pas_size(void *srqc)
129 u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
130 u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
131 u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
132 u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
133 u32 po_quanta = 1 << (log_page_size - 6);
134 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
135 u32 page_size = 1 << log_page_size;
136 u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
137 u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
139 return rq_num_pas * sizeof(u64);
143 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
144 struct mlx5_create_srq_mbox_in *in, int srq_inlen)
153 srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
154 pas_size = get_pas_size(srqc);
155 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
156 create_in = mlx5_vzalloc(inlen);
160 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
162 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
163 rmpc_srqc_reformat(srqc, rmpc, true);
165 err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
171 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
172 struct mlx5_core_srq *srq)
174 return mlx5_core_destroy_rmp(dev, srq->srqn);
177 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
178 struct mlx5_query_srq_mbox_out *out)
185 rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
189 err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
193 srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
194 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
195 rmpc_srqc_reformat(srqc, rmpc, false);
202 static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
204 return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
207 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
208 struct mlx5_core_srq *srq,
209 struct mlx5_create_srq_mbox_in *in,
220 srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
221 pas_size = get_pas_size(srqc);
222 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
223 create_in = mlx5_vzalloc(inlen);
227 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
228 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
230 memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
231 memcpy(pas, in->pas, pas_size);
233 err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
242 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
243 struct mlx5_core_srq *srq)
245 return mlx5_core_destroy_xsrq(dev, srq->srqn);
248 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
249 struct mlx5_core_srq *srq,
250 struct mlx5_query_srq_mbox_out *out)
255 xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
259 err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
268 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
269 struct mlx5_core_srq *srq, u16 lwm)
271 return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
274 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
275 struct mlx5_create_srq_mbox_in *in, int inlen)
277 struct mlx5_create_srq_mbox_out out;
280 memset(&out, 0, sizeof(out));
282 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
284 err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), sizeof(out));
286 srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
291 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
292 struct mlx5_core_srq *srq)
294 struct mlx5_destroy_srq_mbox_in in;
295 struct mlx5_destroy_srq_mbox_out out;
297 memset(&in, 0, sizeof(in));
298 memset(&out, 0, sizeof(out));
299 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
300 in.srqn = cpu_to_be32(srq->srqn);
302 return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
305 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
306 struct mlx5_query_srq_mbox_out *out)
308 struct mlx5_query_srq_mbox_in in;
310 memset(&in, 0, sizeof(in));
312 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
313 in.srqn = cpu_to_be32(srq->srqn);
315 return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)out, sizeof(*out));
318 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
321 struct mlx5_arm_srq_mbox_in in;
322 struct mlx5_arm_srq_mbox_out out;
324 memset(&in, 0, sizeof(in));
325 memset(&out, 0, sizeof(out));
327 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
328 in.hdr.opmod = cpu_to_be16(!!is_srq);
329 in.srqn = cpu_to_be32(srq->srqn);
330 in.lwm = cpu_to_be16(lwm);
332 return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
335 static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
336 struct mlx5_create_srq_mbox_in *in, int inlen,
340 return create_srq_cmd(dev, srq, in, inlen);
341 else if (srq->common.res == MLX5_RES_XSRQ)
342 return create_xrc_srq_cmd(dev, srq, in, inlen);
344 return create_rmp_cmd(dev, srq, in, inlen);
347 static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
350 return destroy_srq_cmd(dev, srq);
351 else if (srq->common.res == MLX5_RES_XSRQ)
352 return destroy_xrc_srq_cmd(dev, srq);
354 return destroy_rmp_cmd(dev, srq);
357 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
358 struct mlx5_create_srq_mbox_in *in, int inlen,
362 struct mlx5_srq_table *table = &dev->priv.srq_table;
364 srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
366 err = create_srq_split(dev, srq, in, inlen, is_xrc);
370 atomic_set(&srq->refcount, 1);
371 init_completion(&srq->free);
373 spin_lock_irq(&table->lock);
374 err = radix_tree_insert(&table->tree, srq->srqn, srq);
375 spin_unlock_irq(&table->lock);
377 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
378 goto err_destroy_srq_split;
383 err_destroy_srq_split:
384 destroy_srq_split(dev, srq);
388 EXPORT_SYMBOL(mlx5_core_create_srq);
390 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
392 struct mlx5_srq_table *table = &dev->priv.srq_table;
393 struct mlx5_core_srq *tmp;
396 spin_lock_irq(&table->lock);
397 tmp = radix_tree_delete(&table->tree, srq->srqn);
398 spin_unlock_irq(&table->lock);
400 mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
404 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
408 err = destroy_srq_split(dev, srq);
412 if (atomic_dec_and_test(&srq->refcount))
413 complete(&srq->free);
414 wait_for_completion(&srq->free);
418 EXPORT_SYMBOL(mlx5_core_destroy_srq);
420 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
421 struct mlx5_query_srq_mbox_out *out)
424 return query_srq_cmd(dev, srq, out);
425 else if (srq->common.res == MLX5_RES_XSRQ)
426 return query_xrc_srq_cmd(dev, srq, out);
428 return query_rmp_cmd(dev, srq, out);
430 EXPORT_SYMBOL(mlx5_core_query_srq);
432 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
436 return arm_srq_cmd(dev, srq, lwm, is_srq);
437 else if (srq->common.res == MLX5_RES_XSRQ)
438 return arm_xrc_srq_cmd(dev, srq, lwm);
440 return arm_rmp_cmd(dev, srq, lwm);
442 EXPORT_SYMBOL(mlx5_core_arm_srq);
444 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
446 struct mlx5_srq_table *table = &dev->priv.srq_table;
448 spin_lock_init(&table->lock);
449 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
452 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)