2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/qp.h>
30 #include <dev/mlx5/srq.h>
31 #include <linux/slab.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_user_verbs.h>
37 /* not supported currently */
38 static int srq_signature;
40 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
42 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
47 struct ib_event event;
48 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
50 if (ibsrq->event_handler) {
51 event.device = ibsrq->device;
52 event.element.srq = ibsrq;
54 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
55 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
57 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
58 event.event = IB_EVENT_SRQ_ERR;
61 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
66 ibsrq->event_handler(&event, ibsrq->srq_context);
70 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
71 struct mlx5_srq_attr *in,
72 struct ib_udata *udata, int buf_size)
74 struct mlx5_ib_dev *dev = to_mdev(pd->device);
75 struct mlx5_ib_create_srq ucmd = {};
82 u32 uidx = MLX5_IB_DEFAULT_UIDX;
84 ucmdlen = min(udata->inlen, sizeof(ucmd));
86 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
87 mlx5_ib_dbg(dev, "failed copy udata\n");
91 if (ucmd.reserved0 || ucmd.reserved1)
94 if (udata->inlen > sizeof(ucmd) &&
95 !ib_is_udata_cleared(udata, sizeof(ucmd),
96 udata->inlen - sizeof(ucmd)))
99 if (in->type == IB_SRQT_XRC) {
100 err = get_srq_user_index(to_mucontext(pd->uobject->context),
101 &ucmd, udata->inlen, &uidx);
106 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
108 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
110 if (IS_ERR(srq->umem)) {
111 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
112 err = PTR_ERR(srq->umem);
116 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
117 &page_shift, &ncont, NULL);
118 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
121 mlx5_ib_warn(dev, "bad offset\n");
125 in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
131 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
133 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
134 ucmd.db_addr, &srq->db);
136 mlx5_ib_dbg(dev, "map doorbell failed\n");
140 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
141 in->page_offset = offset;
142 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
143 in->type == IB_SRQT_XRC)
144 in->user_index = uidx;
151 ib_umem_release(srq->umem);
156 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
157 struct mlx5_srq_attr *in, int buf_size)
161 struct mlx5_wqe_srq_next_seg *next;
165 err = mlx5_db_alloc(dev->mdev, &srq->db);
167 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
171 if (mlx5_buf_alloc(dev->mdev, buf_size, 2 * PAGE_SIZE, &srq->buf)) {
172 mlx5_ib_dbg(dev, "buf alloc failed\n");
176 page_shift = srq->buf.page_shift;
179 srq->tail = srq->msrq.max - 1;
182 for (i = 0; i < srq->msrq.max; i++) {
183 next = get_wqe(srq, i);
184 next->next_wqe_index =
185 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
188 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
189 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
190 buf_size, page_shift, srq->buf.npages, npages);
191 in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
196 mlx5_fill_page_array(&srq->buf, in->pas);
198 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
200 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
201 (unsigned long)(srq->msrq.max * sizeof(u64)));
205 srq->wq_sig = !!srq_signature;
207 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
208 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
209 in->type == IB_SRQT_XRC)
210 in->user_index = MLX5_IB_DEFAULT_UIDX;
218 mlx5_buf_free(dev->mdev, &srq->buf);
221 mlx5_db_free(dev->mdev, &srq->db);
225 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
227 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
228 ib_umem_release(srq->umem);
232 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
235 mlx5_buf_free(dev->mdev, &srq->buf);
236 mlx5_db_free(dev->mdev, &srq->db);
239 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
240 struct ib_srq_init_attr *init_attr,
241 struct ib_udata *udata)
243 struct mlx5_ib_dev *dev = to_mdev(pd->device);
244 struct mlx5_ib_srq *srq;
248 struct mlx5_srq_attr in = {0};
249 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
251 /* Sanity check SRQ size before proceeding */
252 if (init_attr->attr.max_wr >= max_srq_wqes) {
253 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
254 init_attr->attr.max_wr,
256 return ERR_PTR(-EINVAL);
259 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
261 return ERR_PTR(-ENOMEM);
263 mutex_init(&srq->mutex);
264 spin_lock_init(&srq->lock);
265 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
266 srq->msrq.max_gs = init_attr->attr.max_sge;
268 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
269 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
270 desc_size = roundup_pow_of_two(desc_size);
271 desc_size = max_t(int, 32, desc_size);
272 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
273 sizeof(struct mlx5_wqe_data_seg);
274 srq->msrq.wqe_shift = ilog2(desc_size);
275 buf_size = srq->msrq.max * desc_size;
276 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
277 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
278 srq->msrq.max_avail_gather);
281 err = create_srq_user(pd, srq, &in, udata, buf_size);
283 err = create_srq_kernel(dev, srq, &in, buf_size);
286 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
287 pd->uobject ? "user" : "kernel", err);
291 in.type = init_attr->srq_type;
292 in.log_size = ilog2(srq->msrq.max);
293 in.wqe_shift = srq->msrq.wqe_shift - 4;
295 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
296 if (init_attr->srq_type == IB_SRQT_XRC) {
297 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
298 in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
299 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
300 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
301 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
304 in.pd = to_mpd(pd)->pdn;
305 in.db_record = srq->db.dma;
306 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
309 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
310 goto err_usr_kern_srq;
313 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
315 srq->msrq.event = mlx5_ib_srq_event;
316 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
319 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
320 mlx5_ib_dbg(dev, "copy to user failed\n");
325 init_attr->attr.max_wr = srq->msrq.max - 1;
330 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
334 destroy_srq_user(pd, srq);
336 destroy_srq_kernel(dev, srq);
344 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
345 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
347 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
348 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
351 /* We don't support resizing SRQs yet */
352 if (attr_mask & IB_SRQ_MAX_WR)
355 if (attr_mask & IB_SRQ_LIMIT) {
356 if (attr->srq_limit >= srq->msrq.max)
359 mutex_lock(&srq->mutex);
360 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
361 mutex_unlock(&srq->mutex);
370 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
372 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
373 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
375 struct mlx5_srq_attr *out;
377 out = kzalloc(sizeof(*out), GFP_KERNEL);
381 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
385 srq_attr->srq_limit = out->lwm;
386 srq_attr->max_wr = srq->msrq.max - 1;
387 srq_attr->max_sge = srq->msrq.max_gs;
394 int mlx5_ib_destroy_srq(struct ib_srq *srq)
396 struct mlx5_ib_dev *dev = to_mdev(srq->device);
397 struct mlx5_ib_srq *msrq = to_msrq(srq);
399 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
402 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
403 ib_umem_release(msrq->umem);
405 destroy_srq_kernel(dev, msrq);
412 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
414 struct mlx5_wqe_srq_next_seg *next;
416 /* always called with interrupts disabled. */
417 spin_lock(&srq->lock);
419 next = get_wqe(srq, srq->tail);
420 next->next_wqe_index = cpu_to_be16(wqe_index);
421 srq->tail = wqe_index;
423 spin_unlock(&srq->lock);
426 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
427 struct ib_recv_wr **bad_wr)
429 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
430 struct mlx5_wqe_srq_next_seg *next;
431 struct mlx5_wqe_data_seg *scat;
432 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
433 struct mlx5_core_dev *mdev = dev->mdev;
439 spin_lock_irqsave(&srq->lock, flags);
441 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
447 for (nreq = 0; wr; nreq++, wr = wr->next) {
448 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
454 if (unlikely(srq->head == srq->tail)) {
460 srq->wrid[srq->head] = wr->wr_id;
462 next = get_wqe(srq, srq->head);
463 srq->head = be16_to_cpu(next->next_wqe_index);
464 scat = (struct mlx5_wqe_data_seg *)(next + 1);
466 for (i = 0; i < wr->num_sge; i++) {
467 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
468 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
469 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
472 if (i < srq->msrq.max_avail_gather) {
473 scat[i].byte_count = 0;
474 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
480 srq->wqe_ctr += nreq;
482 /* Make sure that descriptors are written before
487 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
490 spin_unlock_irqrestore(&srq->lock, flags);