2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/srq.h>
32 #include <rdma/ib_verbs.h>
33 #include "mlx5_core.h"
36 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
38 struct mlx5_srq_table *table = &dev->priv.srq_table;
39 struct mlx5_core_srq *srq;
41 spin_lock(&table->lock);
43 srq = radix_tree_lookup(&table->tree, srqn);
45 atomic_inc(&srq->refcount);
47 spin_unlock(&table->lock);
50 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
54 srq->event(srq, event_type);
56 if (atomic_dec_and_test(&srq->refcount))
60 static void set_wq(void *wq, struct mlx5_srq_attr *in)
62 MLX5_SET(wq, wq, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
63 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
64 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
65 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
66 MLX5_SET(wq, wq, page_offset, in->page_offset);
67 MLX5_SET(wq, wq, lwm, in->lwm);
68 MLX5_SET(wq, wq, pd, in->pd);
69 MLX5_SET64(wq, wq, dbr_addr, in->db_record);
72 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
74 MLX5_SET(srqc, srqc, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
75 MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
76 MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
77 MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
78 MLX5_SET(srqc, srqc, page_offset, in->page_offset);
79 MLX5_SET(srqc, srqc, lwm, in->lwm);
80 MLX5_SET(srqc, srqc, pd, in->pd);
81 MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
82 MLX5_SET(srqc, srqc, xrcd, in->xrcd);
83 MLX5_SET(srqc, srqc, cqn, in->cqn);
86 static void get_wq(void *wq, struct mlx5_srq_attr *in)
88 if (MLX5_GET(wq, wq, wq_signature))
89 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
90 in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
91 in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
92 in->log_size = MLX5_GET(wq, wq, log_wq_sz);
93 in->page_offset = MLX5_GET(wq, wq, page_offset);
94 in->lwm = MLX5_GET(wq, wq, lwm);
95 in->pd = MLX5_GET(wq, wq, pd);
96 in->db_record = MLX5_GET64(wq, wq, dbr_addr);
99 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
101 if (MLX5_GET(srqc, srqc, wq_signature))
102 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
103 in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
104 in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
105 in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
106 in->page_offset = MLX5_GET(srqc, srqc, page_offset);
107 in->lwm = MLX5_GET(srqc, srqc, lwm);
108 in->pd = MLX5_GET(srqc, srqc, pd);
109 in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
112 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
114 struct mlx5_srq_table *table = &dev->priv.srq_table;
115 struct mlx5_core_srq *srq;
117 spin_lock(&table->lock);
119 srq = radix_tree_lookup(&table->tree, srqn);
121 atomic_inc(&srq->refcount);
123 spin_unlock(&table->lock);
127 EXPORT_SYMBOL(mlx5_core_get_srq);
129 static int get_pas_size(struct mlx5_srq_attr *in)
131 u32 log_page_size = in->log_page_size + 12;
132 u32 log_srq_size = in->log_size;
133 u32 log_rq_stride = in->wqe_shift;
134 u32 page_offset = in->page_offset;
135 u32 po_quanta = 1 << (log_page_size - 6);
136 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
137 u32 page_size = 1 << log_page_size;
138 u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
139 u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
141 return rq_num_pas * sizeof(u64);
145 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
146 struct mlx5_srq_attr *in)
155 pas_size = get_pas_size(in);
156 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
157 create_in = mlx5_vzalloc(inlen);
161 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
162 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
164 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
166 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
168 err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
174 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
175 struct mlx5_core_srq *srq)
177 return mlx5_core_destroy_rmp(dev, srq->srqn);
180 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
181 struct mlx5_srq_attr *out)
187 rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
191 err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
195 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
196 get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
197 if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
198 out->flags |= MLX5_SRQ_FLAG_ERR;
205 static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
207 return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
210 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
211 struct mlx5_core_srq *srq,
212 struct mlx5_srq_attr *in)
221 pas_size = get_pas_size(in);
222 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
223 create_in = mlx5_vzalloc(inlen);
227 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
228 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
230 set_srqc(xrc_srqc, in);
231 MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
232 memcpy(pas, in->pas, pas_size);
234 err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
243 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
244 struct mlx5_core_srq *srq)
246 return mlx5_core_destroy_xsrq(dev, srq->srqn);
249 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
250 struct mlx5_core_srq *srq,
251 struct mlx5_srq_attr *out)
257 xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
261 err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
265 xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
266 xrc_srq_context_entry);
267 get_srqc(xrc_srqc, out);
268 if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
269 out->flags |= MLX5_SRQ_FLAG_ERR;
276 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
277 struct mlx5_core_srq *srq, u16 lwm)
279 return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
282 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
283 struct mlx5_srq_attr *in)
285 u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
293 pas_size = get_pas_size(in);
294 inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
295 create_in = mlx5_vzalloc(inlen);
299 srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
300 pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
303 memcpy(pas, in->pas, pas_size);
305 MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ);
306 err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out));
309 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
314 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
315 struct mlx5_core_srq *srq)
317 u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
318 u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
320 MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
321 MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
323 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
326 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
327 struct mlx5_srq_attr *out)
329 u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
332 int outlen = MLX5_ST_SZ_BYTES(query_srq_out);
335 srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
339 MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ);
340 MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
341 err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, outlen);
345 srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
347 if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
348 out->flags |= MLX5_SRQ_FLAG_ERR;
354 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
357 /* arm_srq structs missing using identical xrc ones */
358 u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
359 u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
361 MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
362 MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
363 MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
365 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
368 static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
369 struct mlx5_srq_attr *in)
372 return create_srq_cmd(dev, srq, in);
373 else if (srq->common.res == MLX5_RES_XSRQ)
374 return create_xrc_srq_cmd(dev, srq, in);
376 return create_rmp_cmd(dev, srq, in);
379 static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
382 return destroy_srq_cmd(dev, srq);
383 else if (srq->common.res == MLX5_RES_XSRQ)
384 return destroy_xrc_srq_cmd(dev, srq);
386 return destroy_rmp_cmd(dev, srq);
389 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
390 struct mlx5_srq_attr *in)
393 struct mlx5_srq_table *table = &dev->priv.srq_table;
395 if (in->type == IB_SRQT_XRC)
396 srq->common.res = MLX5_RES_XSRQ;
398 srq->common.res = MLX5_RES_SRQ;
400 err = create_srq_split(dev, srq, in);
404 atomic_set(&srq->refcount, 1);
405 init_completion(&srq->free);
407 spin_lock_irq(&table->lock);
408 err = radix_tree_insert(&table->tree, srq->srqn, srq);
409 spin_unlock_irq(&table->lock);
411 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
412 goto err_destroy_srq_split;
417 err_destroy_srq_split:
418 destroy_srq_split(dev, srq);
422 EXPORT_SYMBOL(mlx5_core_create_srq);
424 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
426 struct mlx5_srq_table *table = &dev->priv.srq_table;
427 struct mlx5_core_srq *tmp;
430 spin_lock_irq(&table->lock);
431 tmp = radix_tree_delete(&table->tree, srq->srqn);
432 spin_unlock_irq(&table->lock);
434 mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
438 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
442 err = destroy_srq_split(dev, srq);
446 if (atomic_dec_and_test(&srq->refcount))
447 complete(&srq->free);
448 wait_for_completion(&srq->free);
452 EXPORT_SYMBOL(mlx5_core_destroy_srq);
454 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
455 struct mlx5_srq_attr *out)
458 return query_srq_cmd(dev, srq, out);
459 else if (srq->common.res == MLX5_RES_XSRQ)
460 return query_xrc_srq_cmd(dev, srq, out);
462 return query_rmp_cmd(dev, srq, out);
464 EXPORT_SYMBOL(mlx5_core_query_srq);
466 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
470 return arm_srq_cmd(dev, srq, lwm, is_srq);
471 else if (srq->common.res == MLX5_RES_XSRQ)
472 return arm_xrc_srq_cmd(dev, srq, lwm);
474 return arm_rmp_cmd(dev, srq, lwm);
476 EXPORT_SYMBOL(mlx5_core_arm_srq);
478 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
480 struct mlx5_srq_table *table = &dev->priv.srq_table;
482 memset(table, 0, sizeof(*table));
483 spin_lock_init(&table->lock);
484 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
487 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)