2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/qp.h>
30 #include <dev/mlx5/srq.h>
31 #include <linux/slab.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_user_verbs.h>
38 /* not supported currently */
39 static int srq_signature;
41 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
43 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
46 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
48 struct ib_event event;
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
51 if (ibsrq->event_handler) {
52 event.device = ibsrq->device;
53 event.element.srq = ibsrq;
55 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
56 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
58 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
59 event.event = IB_EVENT_SRQ_ERR;
62 printf("mlx5_ib: WARN: ""mlx5_ib: Unexpected event type %d on SRQ %06x\n", type, srq->srqn);
66 ibsrq->event_handler(&event, ibsrq->srq_context);
70 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
71 struct mlx5_create_srq_mbox_in **in,
72 struct ib_udata *udata, int buf_size, int *inlen)
74 struct mlx5_ib_dev *dev = to_mdev(pd->device);
75 struct mlx5_ib_create_srq ucmd;
82 int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
85 ucmdlen = (drv_data < sizeof(ucmd)) ?
86 drv_data : sizeof(ucmd);
88 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
89 mlx5_ib_err(dev, "failed copy udata\n");
93 if (ucmdlen == sizeof(ucmd) &&
94 ucmd.reserved1 != 0) {
95 mlx5_ib_warn(dev, "corrupted ucmd\n");
99 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
101 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
103 if (IS_ERR(srq->umem)) {
104 mlx5_ib_warn(dev, "failed umem get, size %d\n", buf_size);
105 err = PTR_ERR(srq->umem);
109 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
110 &page_shift, &ncont, NULL);
111 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
114 mlx5_ib_warn(dev, "bad offset\n");
118 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
119 *in = mlx5_vzalloc(*inlen);
121 mlx5_ib_err(dev, "failed allocate mbox\n");
126 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
128 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
129 ucmd.db_addr, &srq->db);
131 mlx5_ib_warn(dev, "map doorbell failed\n");
135 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
136 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
138 if (MLX5_CAP_GEN(dev->mdev, cqe_version)) {
139 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
140 xrc_srq_context_entry);
141 /* 0xffffff means we ask to work with cqe version 0 */
142 if (drv_data > offsetof(struct mlx5_ib_create_srq, uidx))
143 MLX5_SET(xrc_srqc, xsrqc, user_index, ucmd.uidx);
145 MLX5_SET(xrc_srqc, xsrqc, user_index, 0xffffff);
154 ib_umem_release(srq->umem);
159 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
160 struct mlx5_create_srq_mbox_in **in, int buf_size,
165 struct mlx5_wqe_srq_next_seg *next;
169 err = mlx5_db_alloc(dev->mdev, &srq->db);
171 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
175 if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
176 mlx5_ib_err(dev, "buf alloc failed\n");
180 page_shift = srq->buf.page_shift;
183 srq->tail = srq->msrq.max - 1;
186 for (i = 0; i < srq->msrq.max; i++) {
187 next = get_wqe(srq, i);
188 next->next_wqe_index =
189 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
192 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * srq->buf.npages;
193 *in = mlx5_vzalloc(*inlen);
195 mlx5_ib_err(dev, "failed allocate mbox\n");
199 mlx5_fill_page_array(&srq->buf, (*in)->pas);
201 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
206 srq->wq_sig = !!srq_signature;
208 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
210 if (MLX5_CAP_GEN(dev->mdev, cqe_version)) {
211 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
212 xrc_srq_context_entry);
213 /* 0xffffff means we ask to work with cqe version 0 */
214 MLX5_SET(xrc_srqc, xsrqc, user_index, 0xffffff);
223 mlx5_buf_free(dev->mdev, &srq->buf);
226 mlx5_db_free(dev->mdev, &srq->db);
230 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
232 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
233 ib_umem_release(srq->umem);
237 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
240 mlx5_buf_free(dev->mdev, &srq->buf);
241 mlx5_db_free(dev->mdev, &srq->db);
244 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
245 struct ib_srq_init_attr *init_attr,
246 struct ib_udata *udata)
248 struct mlx5_ib_dev *dev = to_mdev(pd->device);
249 struct mlx5_ib_srq *srq;
253 struct mlx5_create_srq_mbox_in *uninitialized_var(in);
254 int uninitialized_var(inlen);
257 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
259 /* Sanity check SRQ size before proceeding */
260 if (init_attr->attr.max_wr >= max_srq_wqes) {
261 mlx5_ib_warn(dev, "max_wr %d, cap %d\n",
262 init_attr->attr.max_wr,
264 return ERR_PTR(-EINVAL);
267 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
269 return ERR_PTR(-ENOMEM);
271 mutex_init(&srq->mutex);
272 spin_lock_init(&srq->lock);
273 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
274 srq->msrq.max_gs = init_attr->attr.max_sge;
276 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
277 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
278 desc_size = roundup_pow_of_two(desc_size);
279 desc_size = max_t(int, 32, desc_size);
280 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
281 sizeof(struct mlx5_wqe_data_seg);
282 srq->msrq.wqe_shift = ilog2(desc_size);
283 buf_size = srq->msrq.max * desc_size;
284 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
285 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
286 srq->msrq.max_avail_gather);
289 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
291 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
294 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
295 pd->uobject ? "user" : "kernel", err);
299 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
300 in->ctx.state_log_sz = ilog2(srq->msrq.max);
301 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
304 xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
305 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
306 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
307 xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
308 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
311 in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
313 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
314 in->ctx.db_record = cpu_to_be64(srq->db.dma);
315 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
318 mlx5_ib_warn(dev, "create SRQ failed, err %d\n", err);
319 goto err_usr_kern_srq;
322 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
324 srq->msrq.event = mlx5_ib_srq_event;
325 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
328 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
329 mlx5_ib_err(dev, "copy to user failed\n");
334 init_attr->attr.max_wr = srq->msrq.max - 1;
339 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
343 destroy_srq_user(pd, srq);
345 destroy_srq_kernel(dev, srq);
353 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
354 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
356 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
357 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
360 /* We don't support resizing SRQs yet */
361 if (attr_mask & IB_SRQ_MAX_WR)
364 if (attr_mask & IB_SRQ_LIMIT) {
365 if (attr->srq_limit >= srq->msrq.max)
368 mutex_lock(&srq->mutex);
369 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
370 mutex_unlock(&srq->mutex);
379 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
381 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
382 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
384 struct mlx5_query_srq_mbox_out *out;
386 out = kzalloc(sizeof(*out), GFP_KERNEL);
390 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
394 srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
395 srq_attr->max_wr = srq->msrq.max - 1;
396 srq_attr->max_sge = srq->msrq.max_gs;
403 int mlx5_ib_destroy_srq(struct ib_srq *srq)
405 struct mlx5_ib_dev *dev = to_mdev(srq->device);
406 struct mlx5_ib_srq *msrq = to_msrq(srq);
408 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
411 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
412 ib_umem_release(msrq->umem);
414 destroy_srq_kernel(dev, msrq);
421 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
423 struct mlx5_wqe_srq_next_seg *next;
425 /* always called with interrupts disabled. */
426 spin_lock(&srq->lock);
428 next = get_wqe(srq, srq->tail);
429 next->next_wqe_index = cpu_to_be16(wqe_index);
430 srq->tail = wqe_index;
432 spin_unlock(&srq->lock);
435 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
436 struct ib_recv_wr **bad_wr)
438 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
439 struct mlx5_wqe_srq_next_seg *next;
440 struct mlx5_wqe_data_seg *scat;
441 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
442 struct mlx5_core_dev *mdev = dev->mdev;
448 spin_lock_irqsave(&srq->lock, flags);
450 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
457 for (nreq = 0; wr; nreq++, wr = wr->next) {
458 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
464 if (unlikely(srq->head == srq->tail)) {
470 srq->wrid[srq->head] = wr->wr_id;
472 next = get_wqe(srq, srq->head);
473 srq->head = be16_to_cpu(next->next_wqe_index);
474 scat = (struct mlx5_wqe_data_seg *)(next + 1);
476 for (i = 0; i < wr->num_sge; i++) {
477 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
478 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
479 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
482 if (i < srq->msrq.max_avail_gather) {
483 scat[i].byte_count = 0;
484 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
490 srq->wqe_ctr += nreq;
492 /* Make sure that descriptors are written before
497 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
500 spin_unlock_irqrestore(&srq->lock, flags);