2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/qp.h>
30 #include <dev/mlx5/srq.h>
31 #include <linux/slab.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_user_verbs.h>
37 /* not supported currently */
38 static int srq_signature;
40 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
42 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
47 struct ib_event event;
48 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
50 if (ibsrq->event_handler) {
51 event.device = ibsrq->device;
52 event.element.srq = ibsrq;
54 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
55 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
57 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
58 event.event = IB_EVENT_SRQ_ERR;
61 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
66 ibsrq->event_handler(&event, ibsrq->srq_context);
70 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
71 struct mlx5_srq_attr *in,
72 struct ib_udata *udata, int buf_size)
74 struct mlx5_ib_dev *dev = to_mdev(pd->device);
75 struct mlx5_ib_create_srq ucmd = {};
82 u32 uidx = MLX5_IB_DEFAULT_UIDX;
84 ucmdlen = min(udata->inlen, sizeof(ucmd));
86 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
87 mlx5_ib_dbg(dev, "failed copy udata\n");
91 if (ucmd.reserved0 || ucmd.reserved1)
94 if (udata->inlen > sizeof(ucmd) &&
95 !ib_is_udata_cleared(udata, sizeof(ucmd),
96 udata->inlen - sizeof(ucmd)))
99 if (in->type == IB_SRQT_XRC) {
100 err = get_srq_user_index(to_mucontext(pd->uobject->context),
101 &ucmd, udata->inlen, &uidx);
106 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
108 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
110 if (IS_ERR(srq->umem)) {
111 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
112 err = PTR_ERR(srq->umem);
116 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
117 &page_shift, &ncont, NULL);
118 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
121 mlx5_ib_warn(dev, "bad offset\n");
125 in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
131 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
133 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
134 ucmd.db_addr, &srq->db);
136 mlx5_ib_dbg(dev, "map doorbell failed\n");
140 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
141 in->page_offset = offset;
142 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
143 in->type == IB_SRQT_XRC)
144 in->user_index = uidx;
151 ib_umem_release(srq->umem);
156 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
157 struct mlx5_srq_attr *in, int buf_size)
161 struct mlx5_wqe_srq_next_seg *next;
163 err = mlx5_db_alloc(dev->mdev, &srq->db);
165 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
169 if (mlx5_buf_alloc(dev->mdev, buf_size, 2 * PAGE_SIZE, &srq->buf)) {
170 mlx5_ib_dbg(dev, "buf alloc failed\n");
176 srq->tail = srq->msrq.max - 1;
179 for (i = 0; i < srq->msrq.max; i++) {
180 next = get_wqe(srq, i);
181 next->next_wqe_index =
182 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
185 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
186 in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages);
191 mlx5_fill_page_array(&srq->buf, in->pas);
193 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
195 mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
196 (unsigned long)(srq->msrq.max * sizeof(u64)));
200 srq->wq_sig = !!srq_signature;
202 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
203 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
204 in->type == IB_SRQT_XRC)
205 in->user_index = MLX5_IB_DEFAULT_UIDX;
213 mlx5_buf_free(dev->mdev, &srq->buf);
216 mlx5_db_free(dev->mdev, &srq->db);
220 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
222 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
223 ib_umem_release(srq->umem);
227 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
230 mlx5_buf_free(dev->mdev, &srq->buf);
231 mlx5_db_free(dev->mdev, &srq->db);
234 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
235 struct ib_srq_init_attr *init_attr,
236 struct ib_udata *udata)
238 struct mlx5_ib_dev *dev = to_mdev(pd->device);
239 struct mlx5_ib_srq *srq;
243 struct mlx5_srq_attr in = {0};
244 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
246 /* Sanity check SRQ size before proceeding */
247 if (init_attr->attr.max_wr >= max_srq_wqes) {
248 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
249 init_attr->attr.max_wr,
251 return ERR_PTR(-EINVAL);
254 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
256 return ERR_PTR(-ENOMEM);
258 mutex_init(&srq->mutex);
259 spin_lock_init(&srq->lock);
260 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
261 srq->msrq.max_gs = init_attr->attr.max_sge;
263 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
264 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
265 if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
269 desc_size = roundup_pow_of_two(desc_size);
270 desc_size = max_t(size_t, 32, desc_size);
271 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
276 sizeof(struct mlx5_wqe_data_seg);
277 srq->msrq.wqe_shift = ilog2(desc_size);
278 buf_size = srq->msrq.max * desc_size;
279 if (buf_size < desc_size) {
283 in.type = init_attr->srq_type;
286 err = create_srq_user(pd, srq, &in, udata, buf_size);
288 err = create_srq_kernel(dev, srq, &in, buf_size);
290 if (err || !in.pas) {
291 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
292 pd->uobject ? "user" : "kernel", err);
296 in.log_size = ilog2(srq->msrq.max);
297 in.wqe_shift = srq->msrq.wqe_shift - 4;
299 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
300 if (init_attr->srq_type == IB_SRQT_XRC) {
301 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
302 in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
303 } else if (init_attr->srq_type == IB_SRQT_BASIC) {
304 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
305 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
308 in.pd = to_mpd(pd)->pdn;
309 in.db_record = srq->db.dma;
310 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
313 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
314 goto err_usr_kern_srq;
317 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
319 srq->msrq.event = mlx5_ib_srq_event;
320 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
323 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
324 mlx5_ib_dbg(dev, "copy to user failed\n");
329 init_attr->attr.max_wr = srq->msrq.max - 1;
334 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
338 destroy_srq_user(pd, srq);
340 destroy_srq_kernel(dev, srq);
348 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
349 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
351 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
352 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
355 /* We don't support resizing SRQs yet */
356 if (attr_mask & IB_SRQ_MAX_WR)
359 if (attr_mask & IB_SRQ_LIMIT) {
360 if (attr->srq_limit >= srq->msrq.max)
363 mutex_lock(&srq->mutex);
364 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
365 mutex_unlock(&srq->mutex);
374 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
376 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
377 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
379 struct mlx5_srq_attr *out;
381 out = kzalloc(sizeof(*out), GFP_KERNEL);
385 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
389 srq_attr->srq_limit = out->lwm;
390 srq_attr->max_wr = srq->msrq.max - 1;
391 srq_attr->max_sge = srq->msrq.max_gs;
398 int mlx5_ib_destroy_srq(struct ib_srq *srq)
400 struct mlx5_ib_dev *dev = to_mdev(srq->device);
401 struct mlx5_ib_srq *msrq = to_msrq(srq);
403 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
406 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
407 ib_umem_release(msrq->umem);
409 destroy_srq_kernel(dev, msrq);
416 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
418 struct mlx5_wqe_srq_next_seg *next;
420 /* always called with interrupts disabled. */
421 spin_lock(&srq->lock);
423 next = get_wqe(srq, srq->tail);
424 next->next_wqe_index = cpu_to_be16(wqe_index);
425 srq->tail = wqe_index;
427 spin_unlock(&srq->lock);
430 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
431 struct ib_recv_wr **bad_wr)
433 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
434 struct mlx5_wqe_srq_next_seg *next;
435 struct mlx5_wqe_data_seg *scat;
436 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
437 struct mlx5_core_dev *mdev = dev->mdev;
443 spin_lock_irqsave(&srq->lock, flags);
445 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
451 for (nreq = 0; wr; nreq++, wr = wr->next) {
452 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
458 if (unlikely(srq->head == srq->tail)) {
464 srq->wrid[srq->head] = wr->wr_id;
466 next = get_wqe(srq, srq->head);
467 srq->head = be16_to_cpu(next->next_wqe_index);
468 scat = (struct mlx5_wqe_data_seg *)(next + 1);
470 for (i = 0; i < wr->num_sge; i++) {
471 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
472 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
473 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
476 if (i < srq->msrq.max_avail_gather) {
477 scat[i].byte_count = 0;
478 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
484 srq->wqe_ctr += nreq;
486 /* Make sure that descriptors are written before
491 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
494 spin_unlock_irqrestore(&srq->lock, flags);