2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <dev/mlx4/cq.h>
35 #include <dev/mlx4/qp.h>
36 #include <dev/mlx4/srq.h>
37 #include <linux/slab.h>
42 /* Which firmware version adds support for Resize CQ */
43 #define MLX4_FW_VER_RESIZE_CQ mlx4_fw_ver(2, 5, 0)
44 #define MLX4_FW_VER_IGNORE_OVERRUN_CQ mlx4_fw_ver(2, 7, 8200)
46 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
48 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
49 ibcq->comp_handler(ibcq, ibcq->cq_context);
52 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
54 struct ib_event event;
57 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
58 pr_warn("Unexpected event type %d "
59 "on CQ %06x\n", type, cq->cqn);
63 ibcq = &to_mibcq(cq)->ibcq;
64 if (ibcq->event_handler) {
65 event.device = ibcq->device;
66 event.event = IB_EVENT_CQ_ERR;
67 event.element.cq = ibcq;
68 ibcq->event_handler(&event, ibcq->cq_context);
72 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
74 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
77 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
79 return get_cqe_from_buf(&cq->buf, n);
82 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
84 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
85 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
87 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
88 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
91 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
93 return get_sw_cqe(cq, cq->mcq.cons_index);
96 int mlx4_ib_modify_cq(struct ib_cq *cq,
97 struct ib_cq_attr *cq_attr,
101 struct mlx4_ib_cq *mcq = to_mcq(cq);
102 struct mlx4_ib_dev *dev = to_mdev(cq->device);
104 if (cq_attr_mask & IB_CQ_CAP_FLAGS) {
105 if (cq_attr->cq_cap_flags & IB_CQ_TIMESTAMP)
108 if (cq_attr->cq_cap_flags & IB_CQ_IGNORE_OVERRUN) {
109 if (dev->dev->caps.cq_flags & MLX4_DEV_CAP_CQ_FLAG_IO)
110 err = mlx4_cq_ignore_overrun(dev->dev, &mcq->mcq);
117 if (cq_attr_mask & IB_CQ_MODERATION)
118 err = mlx4_cq_modify(dev->dev, &mcq->mcq,
119 cq_attr->moderation.cq_count,
120 cq_attr->moderation.cq_period);
125 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
129 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
130 PAGE_SIZE * 2, &buf->buf);
135 buf->entry_size = dev->dev->caps.cqe_size;
136 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
141 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
148 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
151 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
157 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
159 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
162 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
163 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
164 u64 buf_addr, int cqe)
167 int cqe_size = dev->dev->caps.cqe_size;
171 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
172 IB_ACCESS_LOCAL_WRITE, 1);
174 return PTR_ERR(*umem);
176 n = ib_umem_page_count(*umem);
177 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
178 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
183 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
190 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
193 ib_umem_release(*umem);
198 /* we don't support system timestamping */
199 #define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_TIMESTAMP
201 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
202 struct ib_cq_init_attr *attr,
203 struct ib_ucontext *context,
204 struct ib_udata *udata)
206 struct mlx4_ib_dev *dev = to_mdev(ibdev);
207 struct mlx4_ib_cq *cq;
208 struct mlx4_uar *uar;
210 int entries = attr->cqe;
211 int vector = attr->comp_vector;
213 if (entries < 1 || entries > dev->dev->caps.max_cqes)
214 return ERR_PTR(-EINVAL);
216 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
217 return ERR_PTR(-EINVAL);
219 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
221 return ERR_PTR(-ENOMEM);
223 entries = roundup_pow_of_two(entries + 1);
224 cq->ibcq.cqe = entries - 1;
225 mutex_init(&cq->resize_mutex);
226 spin_lock_init(&cq->lock);
227 cq->resize_buf = NULL;
228 cq->resize_umem = NULL;
229 cq->create_flags = attr->flags;
232 struct mlx4_ib_create_cq ucmd;
234 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
239 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
240 ucmd.buf_addr, entries);
244 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
249 uar = &to_mucontext(context)->uar;
251 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
255 cq->mcq.set_ci_db = cq->db.db;
256 cq->mcq.arm_db = cq->db.db + 1;
257 *cq->mcq.set_ci_db = 0;
260 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
264 uar = &dev->priv_uar;
268 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
270 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
271 cq->db.dma, &cq->mcq, vector, 0,
272 !!(cq->create_flags & IB_CQ_TIMESTAMP));
276 cq->mcq.comp = mlx4_ib_cq_comp;
277 cq->mcq.event = mlx4_ib_cq_event;
280 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
289 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
292 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
295 ib_umem_release(cq->umem);
297 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
301 mlx4_db_free(dev->dev, &cq->db);
309 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
317 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
321 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
323 kfree(cq->resize_buf);
324 cq->resize_buf = NULL;
328 cq->resize_buf->cqe = entries - 1;
333 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
334 int entries, struct ib_udata *udata)
336 struct mlx4_ib_resize_cq ucmd;
342 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
345 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
349 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
350 &cq->resize_umem, ucmd.buf_addr, entries);
352 kfree(cq->resize_buf);
353 cq->resize_buf = NULL;
357 cq->resize_buf->cqe = entries - 1;
362 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
366 i = cq->mcq.cons_index;
367 while (get_sw_cqe(cq, i))
370 return i - cq->mcq.cons_index;
373 static int mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
375 struct mlx4_cqe *cqe, *new_cqe;
377 int cqe_size = cq->buf.entry_size;
378 int cqe_inc = cqe_size == 64 ? 1 : 0;
379 struct mlx4_cqe *start_cqe;
381 i = cq->mcq.cons_index;
382 cqe = get_cqe(cq, i & cq->ibcq.cqe);
386 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
387 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
388 (i + 1) & cq->resize_buf->cqe);
389 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
392 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
393 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
394 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
395 if (cqe == start_cqe) {
396 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", cq->mcq.cqn);
402 ++cq->mcq.cons_index;
406 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
408 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
409 struct mlx4_ib_cq *cq = to_mcq(ibcq);
414 if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ)
417 mutex_lock(&cq->resize_mutex);
418 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
423 entries = roundup_pow_of_two(entries + 1);
424 if (entries == ibcq->cqe + 1) {
429 if (entries > dev->dev->caps.max_cqes + 1) {
435 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
439 /* Can't be smaller than the number of outstanding CQEs */
440 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
441 if (entries < outst_cqe + 1) {
446 err = mlx4_alloc_resize_buf(dev, cq, entries);
453 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
457 mlx4_mtt_cleanup(dev->dev, &mtt);
459 cq->buf = cq->resize_buf->buf;
460 cq->ibcq.cqe = cq->resize_buf->cqe;
461 ib_umem_release(cq->umem);
462 cq->umem = cq->resize_umem;
464 kfree(cq->resize_buf);
465 cq->resize_buf = NULL;
466 cq->resize_umem = NULL;
468 struct mlx4_ib_cq_buf tmp_buf;
471 spin_lock_irq(&cq->lock);
472 if (cq->resize_buf) {
473 err = mlx4_ib_cq_resize_copy_cqes(cq);
475 tmp_cqe = cq->ibcq.cqe;
476 cq->buf = cq->resize_buf->buf;
477 cq->ibcq.cqe = cq->resize_buf->cqe;
479 kfree(cq->resize_buf);
480 cq->resize_buf = NULL;
482 spin_unlock_irq(&cq->lock);
485 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
491 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
493 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
494 cq->resize_buf->cqe);
496 kfree(cq->resize_buf);
497 cq->resize_buf = NULL;
499 if (cq->resize_umem) {
500 ib_umem_release(cq->resize_umem);
501 cq->resize_umem = NULL;
505 mutex_unlock(&cq->resize_mutex);
510 int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq)
512 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
513 struct mlx4_ib_cq *cq = to_mcq(ibcq);
515 if (dev->dev->caps.fw_ver < MLX4_FW_VER_IGNORE_OVERRUN_CQ)
518 return mlx4_cq_ignore_overrun(dev->dev, &cq->mcq);
521 int mlx4_ib_destroy_cq(struct ib_cq *cq)
523 struct mlx4_ib_dev *dev = to_mdev(cq->device);
524 struct mlx4_ib_cq *mcq = to_mcq(cq);
526 mlx4_cq_free(dev->dev, &mcq->mcq);
527 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
530 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
531 ib_umem_release(mcq->umem);
533 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
534 mlx4_db_free(dev->dev, &mcq->db);
542 static void dump_cqe(void *cqe)
546 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
547 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
548 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
549 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
552 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
555 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
556 pr_debug("local QP operation err "
557 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
559 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
560 cqe->vendor_err_syndrome,
561 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
565 switch (cqe->syndrome) {
566 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
567 wc->status = IB_WC_LOC_LEN_ERR;
569 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
570 wc->status = IB_WC_LOC_QP_OP_ERR;
572 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
573 wc->status = IB_WC_LOC_PROT_ERR;
575 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
576 wc->status = IB_WC_WR_FLUSH_ERR;
578 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
579 wc->status = IB_WC_MW_BIND_ERR;
581 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
582 wc->status = IB_WC_BAD_RESP_ERR;
584 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
585 wc->status = IB_WC_LOC_ACCESS_ERR;
587 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
588 wc->status = IB_WC_REM_INV_REQ_ERR;
590 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
591 wc->status = IB_WC_REM_ACCESS_ERR;
593 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
594 wc->status = IB_WC_REM_OP_ERR;
596 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
597 wc->status = IB_WC_RETRY_EXC_ERR;
599 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
600 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
602 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
603 wc->status = IB_WC_REM_ABORT_ERR;
606 wc->status = IB_WC_GENERAL_ERR;
610 wc->vendor_err = cqe->vendor_err_syndrome;
613 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
615 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
616 MLX4_CQE_STATUS_IPV4F |
617 MLX4_CQE_STATUS_IPV4OPT |
618 MLX4_CQE_STATUS_IPV6 |
619 MLX4_CQE_STATUS_IPOK)) ==
620 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
621 MLX4_CQE_STATUS_IPOK)) &&
622 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
623 MLX4_CQE_STATUS_TCP)) &&
624 checksum == cpu_to_be16(0xffff);
627 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
628 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
630 struct mlx4_ib_proxy_sqp_hdr *hdr;
632 ib_dma_sync_single_for_cpu(qp->ibqp.device,
633 qp->sqp_proxy_rcv[tail].map,
634 sizeof (struct mlx4_ib_proxy_sqp_hdr),
636 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
637 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
638 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
639 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
640 wc->dlid_path_bits = 0;
643 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
644 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
645 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
647 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
648 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
654 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
655 struct mlx4_ib_qp **cur_qp,
658 struct mlx4_cqe *cqe;
660 struct mlx4_ib_wq *wq;
661 struct mlx4_ib_srq *srq;
662 struct mlx4_srq *msrq = NULL;
668 int timestamp_en = !!(cq->create_flags & IB_CQ_TIMESTAMP);
672 cqe = next_cqe_sw(cq);
676 if (cq->buf.entry_size == 64)
679 ++cq->mcq.cons_index;
682 * Make sure we read CQ entry contents after we've checked the
687 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
688 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
689 MLX4_CQE_OPCODE_ERROR;
691 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
693 pr_warn("Completion for NOP opcode detected!\n");
697 /* Resize CQ in progress */
698 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
699 if (cq->resize_buf) {
700 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
702 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
703 cq->buf = cq->resize_buf->buf;
704 cq->ibcq.cqe = cq->resize_buf->cqe;
706 kfree(cq->resize_buf);
707 cq->resize_buf = NULL;
714 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
716 * We do not have to take the QP table lock here,
717 * because CQs will be locked while QPs are removed
720 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
721 be32_to_cpu(cqe->vlan_my_qpn));
722 if (unlikely(!mqp)) {
723 pr_warn("CQ %06x with entry for unknown QPN %06x\n",
724 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
728 *cur_qp = to_mibqp(mqp);
731 wc->qp = &(*cur_qp)->ibqp;
733 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
735 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
736 srq_num = g_mlpath_rqpn & 0xffffff;
737 /* SRQ is also in the radix tree */
738 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
740 if (unlikely(!msrq)) {
741 pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
742 cq->mcq.cqn, srq_num);
749 if (!(*cur_qp)->sq_signal_bits) {
750 wqe_ctr = be16_to_cpu(cqe->wqe_index);
751 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
753 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
755 } else if ((*cur_qp)->ibqp.srq) {
756 srq = to_msrq((*cur_qp)->ibqp.srq);
757 wqe_ctr = be16_to_cpu(cqe->wqe_index);
758 wc->wr_id = srq->wrid[wqe_ctr];
759 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
761 srq = to_mibsrq(msrq);
762 wqe_ctr = be16_to_cpu(cqe->wqe_index);
763 wc->wr_id = srq->wrid[wqe_ctr];
764 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
767 tail = wq->tail & (wq->wqe_cnt - 1);
768 wc->wr_id = wq->wrid[tail];
772 if (unlikely(is_error)) {
773 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
777 wc->status = IB_WC_SUCCESS;
781 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
782 case MLX4_OPCODE_RDMA_WRITE_IMM:
783 wc->wc_flags |= IB_WC_WITH_IMM;
785 case MLX4_OPCODE_RDMA_WRITE:
786 wc->opcode = IB_WC_RDMA_WRITE;
788 case MLX4_OPCODE_SEND_IMM:
789 wc->wc_flags |= IB_WC_WITH_IMM;
790 case MLX4_OPCODE_SEND:
791 case MLX4_OPCODE_SEND_INVAL:
792 wc->opcode = IB_WC_SEND;
794 case MLX4_OPCODE_RDMA_READ:
795 wc->opcode = IB_WC_RDMA_READ;
796 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
798 case MLX4_OPCODE_ATOMIC_CS:
799 wc->opcode = IB_WC_COMP_SWAP;
802 case MLX4_OPCODE_ATOMIC_FA:
803 wc->opcode = IB_WC_FETCH_ADD;
806 case MLX4_OPCODE_MASKED_ATOMIC_CS:
807 wc->opcode = IB_WC_MASKED_COMP_SWAP;
810 case MLX4_OPCODE_MASKED_ATOMIC_FA:
811 wc->opcode = IB_WC_MASKED_FETCH_ADD;
814 case MLX4_OPCODE_BIND_MW:
815 wc->opcode = IB_WC_BIND_MW;
817 case MLX4_OPCODE_LSO:
818 wc->opcode = IB_WC_LSO;
820 case MLX4_OPCODE_FMR:
821 wc->opcode = IB_WC_FAST_REG_MR;
823 case MLX4_OPCODE_LOCAL_INVAL:
824 wc->opcode = IB_WC_LOCAL_INV;
828 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
830 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
831 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
832 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
833 wc->wc_flags = IB_WC_WITH_IMM;
834 wc->ex.imm_data = cqe->immed_rss_invalid;
836 case MLX4_RECV_OPCODE_SEND_INVAL:
837 wc->opcode = IB_WC_RECV;
838 wc->wc_flags = IB_WC_WITH_INVALIDATE;
839 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
841 case MLX4_RECV_OPCODE_SEND:
842 wc->opcode = IB_WC_RECV;
845 case MLX4_RECV_OPCODE_SEND_IMM:
846 wc->opcode = IB_WC_RECV;
847 wc->wc_flags = IB_WC_WITH_IMM;
848 wc->ex.imm_data = cqe->immed_rss_invalid;
852 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
853 if ((*cur_qp)->mlx4_ib_qp_type &
854 (MLX4_IB_QPT_PROXY_SMI_OWNER |
855 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
856 return use_tunnel_data
857 (*cur_qp, cq, wc, tail, cqe,
858 rdma_port_get_link_layer
861 IB_LINK_LAYER_ETHERNET);
865 /* currently, only CQ_CREATE_WITH_TIMESTAMPING_RAW is
866 * supported. CQ_CREATE_WITH_TIMESTAMPING_SYS isn't
868 if (cq->create_flags & IB_CQ_TIMESTAMP_TO_SYS_TIME) {
869 wc->ts.timestamp = 0;
872 ((u64)(be32_to_cpu(cqe->timestamp_16_47)
873 + !cqe->timestamp_0_15) << 16)
874 | be16_to_cpu(cqe->timestamp_0_15);
875 wc->wc_flags |= IB_WC_WITH_TIMESTAMP;
878 wc->wc_flags |= IB_WC_WITH_SLID;
879 wc->slid = be16_to_cpu(cqe->rlid);
881 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
882 wc->src_qp = g_mlpath_rqpn & 0xffffff;
883 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
884 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
885 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
886 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
887 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
889 if (rdma_port_get_link_layer(wc->qp->device,
891 IB_LINK_LAYER_ETHERNET)
892 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
894 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
895 wc->wc_flags |= IB_WC_WITH_SL;
897 if ((be32_to_cpu(cqe->vlan_my_qpn) &
898 MLX4_CQE_VLAN_PRESENT_MASK) && !timestamp_en) {
899 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
901 wc->wc_flags |= IB_WC_WITH_VLAN;
903 wc->vlan_id = 0xffff;
906 memcpy(wc->smac, cqe->smac, 6);
907 wc->wc_flags |= IB_WC_WITH_SMAC;
914 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
916 struct mlx4_ib_cq *cq = to_mcq(ibcq);
917 struct mlx4_ib_qp *cur_qp = NULL;
922 spin_lock_irqsave(&cq->lock, flags);
924 for (npolled = 0; npolled < num_entries; ++npolled) {
925 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
930 mlx4_cq_set_ci(&cq->mcq);
932 spin_unlock_irqrestore(&cq->lock, flags);
934 if (err == 0 || err == -EAGAIN)
940 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
942 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
943 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
944 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
945 to_mdev(ibcq->device)->priv_uar.map,
946 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
951 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
955 struct mlx4_cqe *cqe, *dest;
957 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
960 * First we need to find the current producer index, so we
961 * know where to start cleaning from. It doesn't matter if HW
962 * adds new entries after this loop -- the QP we're worried
963 * about is already in RESET, so the new entries won't come
964 * from our QP and therefore don't need to be checked.
966 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
967 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
971 * Now sweep backwards through the CQ, removing CQ entries
972 * that match our QP by copying older entries on top of them.
974 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
975 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
978 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
979 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
980 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
983 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
986 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
987 memcpy(dest, cqe, sizeof *cqe);
988 dest->owner_sr_opcode = owner_bit |
989 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
994 cq->mcq.cons_index += nfreed;
996 * Make sure update of buffer contents is done before
997 * updating consumer index.
1000 mlx4_cq_set_ci(&cq->mcq);
1004 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
1006 spin_lock_irq(&cq->lock);
1007 __mlx4_ib_cq_clean(cq, qpn, srq);
1008 spin_unlock_irq(&cq->lock);