2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/qp.h>
30 #include <dev/mlx5/srq.h>
31 #include <linux/slab.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_user_verbs.h>
37 /* not supported currently */
38 static int srq_signature;
40 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
42 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
47 struct ib_event event;
48 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
50 if (ibsrq->event_handler) {
51 event.device = ibsrq->device;
52 event.element.srq = ibsrq;
54 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
55 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
57 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
58 event.event = IB_EVENT_SRQ_ERR;
61 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
66 ibsrq->event_handler(&event, ibsrq->srq_context);
70 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
71 struct mlx5_create_srq_mbox_in **in,
72 struct ib_udata *udata, int buf_size, int *inlen,
75 struct mlx5_ib_dev *dev = to_mdev(pd->device);
76 struct mlx5_ib_create_srq ucmd = {};
83 u32 uidx = MLX5_IB_DEFAULT_UIDX;
85 ucmdlen = min(udata->inlen, sizeof(ucmd));
87 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
88 mlx5_ib_dbg(dev, "failed copy udata\n");
92 if (ucmd.reserved0 || ucmd.reserved1)
95 if (udata->inlen > sizeof(ucmd) &&
96 !ib_is_udata_cleared(udata, sizeof(ucmd),
97 udata->inlen - sizeof(ucmd)))
100 if (type == IB_SRQT_XRC) {
101 err = get_srq_user_index(to_mucontext(pd->uobject->context),
102 &ucmd, udata->inlen, &uidx);
107 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
109 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
111 if (IS_ERR(srq->umem)) {
112 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
113 err = PTR_ERR(srq->umem);
117 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
118 &page_shift, &ncont, NULL);
119 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
122 mlx5_ib_warn(dev, "bad offset\n");
126 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
127 *in = mlx5_vzalloc(*inlen);
133 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
135 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
136 ucmd.db_addr, &srq->db);
138 mlx5_ib_dbg(dev, "map doorbell failed\n");
142 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
143 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
145 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
146 type == IB_SRQT_XRC) {
147 void *xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
148 xrc_srq_context_entry);
149 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
158 ib_umem_release(srq->umem);
163 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
164 struct mlx5_create_srq_mbox_in **in, int buf_size,
165 int *inlen, int type)
169 struct mlx5_wqe_srq_next_seg *next;
173 err = mlx5_db_alloc(dev->mdev, &srq->db);
175 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
179 if (mlx5_buf_alloc(dev->mdev, buf_size, 2 * PAGE_SIZE, &srq->buf)) {
180 mlx5_ib_dbg(dev, "buf alloc failed\n");
184 page_shift = srq->buf.page_shift;
187 srq->tail = srq->msrq.max - 1;
190 for (i = 0; i < srq->msrq.max; i++) {
191 next = get_wqe(srq, i);
192 next->next_wqe_index =
193 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
196 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
197 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
198 buf_size, page_shift, srq->buf.npages, npages);
199 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
200 *in = mlx5_vzalloc(*inlen);
205 mlx5_fill_page_array(&srq->buf, (*in)->pas);
207 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
209 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
210 (unsigned long)(srq->msrq.max * sizeof(u64)));
214 srq->wq_sig = !!srq_signature;
216 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
218 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
219 type == IB_SRQT_XRC) {
220 void *xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
221 xrc_srq_context_entry);
222 MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
231 mlx5_buf_free(dev->mdev, &srq->buf);
234 mlx5_db_free(dev->mdev, &srq->db);
238 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
240 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
241 ib_umem_release(srq->umem);
245 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
248 mlx5_buf_free(dev->mdev, &srq->buf);
249 mlx5_db_free(dev->mdev, &srq->db);
252 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
253 struct ib_srq_init_attr *init_attr,
254 struct ib_udata *udata)
256 struct mlx5_ib_dev *dev = to_mdev(pd->device);
257 struct mlx5_ib_srq *srq;
261 struct mlx5_create_srq_mbox_in *uninitialized_var(in);
262 int uninitialized_var(inlen);
265 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
267 /* Sanity check SRQ size before proceeding */
268 if (init_attr->attr.max_wr >= max_srq_wqes) {
269 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
270 init_attr->attr.max_wr,
272 return ERR_PTR(-EINVAL);
275 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
277 return ERR_PTR(-ENOMEM);
279 mutex_init(&srq->mutex);
280 spin_lock_init(&srq->lock);
281 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
282 srq->msrq.max_gs = init_attr->attr.max_sge;
284 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
285 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
286 desc_size = roundup_pow_of_two(desc_size);
287 desc_size = max_t(int, 32, desc_size);
288 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
289 sizeof(struct mlx5_wqe_data_seg);
290 srq->msrq.wqe_shift = ilog2(desc_size);
291 buf_size = srq->msrq.max * desc_size;
292 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
293 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
294 srq->msrq.max_avail_gather);
297 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen, init_attr->srq_type);
299 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen, init_attr->srq_type);
302 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
303 pd->uobject ? "user" : "kernel", err);
307 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
308 in->ctx.state_log_sz = ilog2(srq->msrq.max);
309 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
312 xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
313 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
314 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
315 xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
316 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
319 in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
321 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
322 in->ctx.db_record = cpu_to_be64(srq->db.dma);
323 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
326 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
327 goto err_usr_kern_srq;
330 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
332 srq->msrq.event = mlx5_ib_srq_event;
333 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
336 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
337 mlx5_ib_dbg(dev, "copy to user failed\n");
342 init_attr->attr.max_wr = srq->msrq.max - 1;
347 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
351 destroy_srq_user(pd, srq);
353 destroy_srq_kernel(dev, srq);
361 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
362 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
364 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
365 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
368 /* We don't support resizing SRQs yet */
369 if (attr_mask & IB_SRQ_MAX_WR)
372 if (attr_mask & IB_SRQ_LIMIT) {
373 if (attr->srq_limit >= srq->msrq.max)
376 mutex_lock(&srq->mutex);
377 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
378 mutex_unlock(&srq->mutex);
387 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
389 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
390 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
392 struct mlx5_query_srq_mbox_out *out;
394 out = kzalloc(sizeof(*out), GFP_KERNEL);
398 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
402 srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
403 srq_attr->max_wr = srq->msrq.max - 1;
404 srq_attr->max_sge = srq->msrq.max_gs;
411 int mlx5_ib_destroy_srq(struct ib_srq *srq)
413 struct mlx5_ib_dev *dev = to_mdev(srq->device);
414 struct mlx5_ib_srq *msrq = to_msrq(srq);
416 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
419 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
420 ib_umem_release(msrq->umem);
422 destroy_srq_kernel(dev, msrq);
429 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
431 struct mlx5_wqe_srq_next_seg *next;
433 /* always called with interrupts disabled. */
434 spin_lock(&srq->lock);
436 next = get_wqe(srq, srq->tail);
437 next->next_wqe_index = cpu_to_be16(wqe_index);
438 srq->tail = wqe_index;
440 spin_unlock(&srq->lock);
443 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
444 struct ib_recv_wr **bad_wr)
446 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
447 struct mlx5_wqe_srq_next_seg *next;
448 struct mlx5_wqe_data_seg *scat;
449 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
450 struct mlx5_core_dev *mdev = dev->mdev;
456 spin_lock_irqsave(&srq->lock, flags);
458 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
464 for (nreq = 0; wr; nreq++, wr = wr->next) {
465 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
471 if (unlikely(srq->head == srq->tail)) {
477 srq->wrid[srq->head] = wr->wr_id;
479 next = get_wqe(srq, srq->head);
480 srq->head = be16_to_cpu(next->next_wqe_index);
481 scat = (struct mlx5_wqe_data_seg *)(next + 1);
483 for (i = 0; i < wr->num_sge; i++) {
484 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
485 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
486 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
489 if (i < srq->msrq.max_avail_gather) {
490 scat[i].byte_count = 0;
491 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
497 srq->wqe_ctr += nreq;
499 /* Make sure that descriptors are written before
504 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
507 spin_unlock_irqrestore(&srq->lock, flags);