2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/srq.h>
41 /* Which firmware version adds support for Resize CQ */
42 #define MLX4_FW_VER_RESIZE_CQ mlx4_fw_ver(2, 5, 0)
44 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
46 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
47 ibcq->comp_handler(ibcq, ibcq->cq_context);
50 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
52 struct ib_event event;
55 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
56 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
57 "on CQ %06x\n", type, cq->cqn);
61 ibcq = &to_mibcq(cq)->ibcq;
62 if (ibcq->event_handler) {
63 event.device = ibcq->device;
64 event.event = IB_EVENT_CQ_ERR;
65 event.element.cq = ibcq;
66 ibcq->event_handler(&event, ibcq->cq_context);
70 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
72 return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
75 static void *get_cqe(struct mlx4_ib_cq *cq, int n)
77 return get_cqe_from_buf(&cq->buf, n);
80 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
82 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
84 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
88 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
90 return get_sw_cqe(cq, cq->mcq.cons_index);
93 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
95 struct mlx4_ib_cq *mcq = to_mcq(cq);
96 struct mlx4_ib_dev *dev = to_mdev(cq->device);
98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
101 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
105 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
106 PAGE_SIZE * 2, &buf->buf);
111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
126 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
135 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
138 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
139 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
140 u64 buf_addr, int cqe)
144 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
145 IB_ACCESS_LOCAL_WRITE, 1);
147 return PTR_ERR(*umem);
149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
150 ilog2((*umem)->page_size), &buf->mtt);
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
161 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
164 ib_umem_release(*umem);
169 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
170 struct ib_ucontext *context,
171 struct ib_udata *udata)
173 struct mlx4_ib_dev *dev = to_mdev(ibdev);
174 struct mlx4_ib_cq *cq;
175 struct mlx4_uar *uar;
178 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
179 mlx4_ib_dbg("invalid num of entries: %d", entries);
180 return ERR_PTR(-EINVAL);
183 cq = kzalloc(sizeof *cq, GFP_KERNEL);
185 return ERR_PTR(-ENOMEM);
187 entries = roundup_pow_of_two(entries + 1);
188 cq->ibcq.cqe = entries - 1;
189 mutex_init(&cq->resize_mutex);
190 spin_lock_init(&cq->lock);
191 cq->resize_buf = NULL;
192 cq->resize_umem = NULL;
195 struct mlx4_ib_create_cq ucmd;
197 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
202 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
203 ucmd.buf_addr, entries);
207 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
212 uar = &to_mucontext(context)->uar;
214 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
218 cq->mcq.set_ci_db = cq->db.db;
219 cq->mcq.arm_db = cq->db.db + 1;
220 *cq->mcq.set_ci_db = 0;
223 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
227 uar = &dev->priv_uar;
230 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
231 cq->db.dma, &cq->mcq,
232 vector == IB_CQ_VECTOR_LEAST_ATTACHED ?
233 MLX4_LEAST_ATTACHED_VECTOR : vector, 0);
237 cq->mcq.comp = mlx4_ib_cq_comp;
238 cq->mcq.event = mlx4_ib_cq_event;
241 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
250 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
253 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
256 ib_umem_release(cq->umem);
258 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
262 mlx4_db_free(dev->dev, &cq->db);
270 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
278 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
282 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
284 kfree(cq->resize_buf);
285 cq->resize_buf = NULL;
289 cq->resize_buf->cqe = entries - 1;
294 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
295 int entries, struct ib_udata *udata)
297 struct mlx4_ib_resize_cq ucmd;
303 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
306 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
310 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
311 &cq->resize_umem, ucmd.buf_addr, entries);
313 kfree(cq->resize_buf);
314 cq->resize_buf = NULL;
318 cq->resize_buf->cqe = entries - 1;
323 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
327 i = cq->mcq.cons_index;
328 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
331 return i - cq->mcq.cons_index;
334 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
336 struct mlx4_cqe *cqe, *new_cqe;
339 i = cq->mcq.cons_index;
340 cqe = get_cqe(cq, i & cq->ibcq.cqe);
341 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
342 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
343 (i + 1) & cq->resize_buf->cqe);
344 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
345 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
346 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
347 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
349 ++cq->mcq.cons_index;
352 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
354 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
355 struct mlx4_ib_cq *cq = to_mcq(ibcq);
360 if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ)
363 mutex_lock(&cq->resize_mutex);
365 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
370 entries = roundup_pow_of_two(entries + 1);
371 if (entries == ibcq->cqe + 1) {
377 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
381 /* Can't be smaller than the number of outstanding CQEs */
382 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
383 if (entries < outst_cqe + 1) {
388 err = mlx4_alloc_resize_buf(dev, cq, entries);
395 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
399 mlx4_mtt_cleanup(dev->dev, &mtt);
401 cq->buf = cq->resize_buf->buf;
402 cq->ibcq.cqe = cq->resize_buf->cqe;
403 ib_umem_release(cq->umem);
404 cq->umem = cq->resize_umem;
406 kfree(cq->resize_buf);
407 cq->resize_buf = NULL;
408 cq->resize_umem = NULL;
410 struct mlx4_ib_cq_buf tmp_buf;
413 spin_lock_irq(&cq->lock);
414 if (cq->resize_buf) {
415 mlx4_ib_cq_resize_copy_cqes(cq);
417 tmp_cqe = cq->ibcq.cqe;
418 cq->buf = cq->resize_buf->buf;
419 cq->ibcq.cqe = cq->resize_buf->cqe;
421 kfree(cq->resize_buf);
422 cq->resize_buf = NULL;
424 spin_unlock_irq(&cq->lock);
427 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
433 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
435 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
436 cq->resize_buf->cqe);
438 kfree(cq->resize_buf);
439 cq->resize_buf = NULL;
441 if (cq->resize_umem) {
442 ib_umem_release(cq->resize_umem);
443 cq->resize_umem = NULL;
447 mutex_unlock(&cq->resize_mutex);
451 int mlx4_ib_destroy_cq(struct ib_cq *cq)
453 struct mlx4_ib_dev *dev = to_mdev(cq->device);
454 struct mlx4_ib_cq *mcq = to_mcq(cq);
456 mlx4_cq_free(dev->dev, &mcq->mcq);
457 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
460 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
461 ib_umem_release(mcq->umem);
463 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
464 mlx4_db_free(dev->dev, &mcq->db);
472 static void dump_cqe(void *cqe)
476 printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
477 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
478 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
479 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
482 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
485 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
486 printk(KERN_DEBUG "local QP operation err "
487 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
489 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
490 cqe->vendor_err_syndrome,
491 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
495 switch (cqe->syndrome) {
496 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
497 wc->status = IB_WC_LOC_LEN_ERR;
499 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
500 wc->status = IB_WC_LOC_QP_OP_ERR;
502 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
503 wc->status = IB_WC_LOC_PROT_ERR;
505 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
506 wc->status = IB_WC_WR_FLUSH_ERR;
508 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
509 wc->status = IB_WC_MW_BIND_ERR;
511 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
512 wc->status = IB_WC_BAD_RESP_ERR;
514 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
515 wc->status = IB_WC_LOC_ACCESS_ERR;
517 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
518 wc->status = IB_WC_REM_INV_REQ_ERR;
520 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
521 wc->status = IB_WC_REM_ACCESS_ERR;
523 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
524 wc->status = IB_WC_REM_OP_ERR;
526 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
527 wc->status = IB_WC_RETRY_EXC_ERR;
529 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
530 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
532 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
533 wc->status = IB_WC_REM_ABORT_ERR;
536 wc->status = IB_WC_GENERAL_ERR;
540 wc->vendor_err = cqe->vendor_err_syndrome;
543 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
545 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
546 MLX4_CQE_STATUS_IPV4F |
547 MLX4_CQE_STATUS_IPV4OPT |
548 MLX4_CQE_STATUS_IPV6 |
549 MLX4_CQE_STATUS_IPOK)) ==
550 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
551 MLX4_CQE_STATUS_IPOK)) &&
552 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
553 MLX4_CQE_STATUS_TCP)) &&
554 checksum == cpu_to_be16(0xffff);
557 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
558 struct mlx4_ib_qp **cur_qp,
561 struct mlx4_cqe *cqe;
563 struct mlx4_ib_wq *wq;
564 struct mlx4_ib_srq *srq;
565 struct mlx4_srq *msrq;
573 cqe = next_cqe_sw(cq);
577 ++cq->mcq.cons_index;
580 * Make sure we read CQ entry contents after we've checked the
585 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
586 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
587 MLX4_CQE_OPCODE_ERROR;
589 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
591 printk(KERN_WARNING "Completion for NOP opcode detected!\n");
595 /* Resize CQ in progress */
596 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
597 if (cq->resize_buf) {
598 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
600 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
601 cq->buf = cq->resize_buf->buf;
602 cq->ibcq.cqe = cq->resize_buf->cqe;
604 kfree(cq->resize_buf);
605 cq->resize_buf = NULL;
611 if ((be32_to_cpu(cqe->vlan_my_qpn) & (1 << 23)) && !is_send) {
613 * We do not have to take the XRC SRQ table lock here,
614 * because CQs will be locked while XRC SRQs are removed
617 msrq = __mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
618 be32_to_cpu(cqe->g_mlpath_rqpn) &
620 if (unlikely(!msrq)) {
621 printk(KERN_WARNING "CQ %06x with entry for unknown "
622 "XRC SRQ %06x\n", cq->mcq.cqn,
623 be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff);
627 srq = to_mibsrq(msrq);
628 } else if (!*cur_qp ||
629 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
631 * We do not have to take the QP table lock here,
632 * because CQs will be locked while QPs are removed
635 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
636 be32_to_cpu(cqe->vlan_my_qpn));
637 if (unlikely(!mqp)) {
638 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
639 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
643 *cur_qp = to_mibqp(mqp);
646 wc->qp = is_xrc_recv ? NULL: &(*cur_qp)->ibqp;
650 if (!(*cur_qp)->sq_signal_bits) {
651 wqe_ctr = be16_to_cpu(cqe->wqe_index);
652 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
654 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
656 } else if (is_xrc_recv) {
657 wqe_ctr = be16_to_cpu(cqe->wqe_index);
658 wc->wr_id = srq->wrid[wqe_ctr];
659 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
660 } else if ((*cur_qp)->ibqp.srq) {
661 srq = to_msrq((*cur_qp)->ibqp.srq);
662 wqe_ctr = be16_to_cpu(cqe->wqe_index);
663 wc->wr_id = srq->wrid[wqe_ctr];
664 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
667 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
671 if (unlikely(is_error)) {
672 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
676 wc->status = IB_WC_SUCCESS;
680 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
681 case MLX4_OPCODE_RDMA_WRITE_IMM:
682 wc->wc_flags |= IB_WC_WITH_IMM;
683 case MLX4_OPCODE_RDMA_WRITE:
684 wc->opcode = IB_WC_RDMA_WRITE;
686 case MLX4_OPCODE_SEND_IMM:
687 wc->wc_flags |= IB_WC_WITH_IMM;
688 case MLX4_OPCODE_SEND:
689 case MLX4_OPCODE_SEND_INVAL:
690 wc->opcode = IB_WC_SEND;
692 case MLX4_OPCODE_RDMA_READ:
693 wc->opcode = IB_WC_RDMA_READ;
694 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
696 case MLX4_OPCODE_ATOMIC_CS:
697 wc->opcode = IB_WC_COMP_SWAP;
700 case MLX4_OPCODE_ATOMIC_FA:
701 wc->opcode = IB_WC_FETCH_ADD;
704 case MLX4_OPCODE_MASKED_ATOMIC_CS:
705 wc->opcode = IB_WC_MASKED_COMP_SWAP;
708 case MLX4_OPCODE_MASKED_ATOMIC_FA:
709 wc->opcode = IB_WC_MASKED_FETCH_ADD;
712 case MLX4_OPCODE_BIND_MW:
713 wc->opcode = IB_WC_BIND_MW;
715 case MLX4_OPCODE_LSO:
716 wc->opcode = IB_WC_LSO;
718 case MLX4_OPCODE_FMR:
719 wc->opcode = IB_WC_FAST_REG_MR;
721 case MLX4_OPCODE_LOCAL_INVAL:
722 wc->opcode = IB_WC_LOCAL_INV;
726 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
728 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
729 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
730 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
731 wc->wc_flags = IB_WC_WITH_IMM;
732 wc->ex.imm_data = cqe->immed_rss_invalid;
734 case MLX4_RECV_OPCODE_SEND_INVAL:
735 wc->opcode = IB_WC_RECV;
736 wc->wc_flags = IB_WC_WITH_INVALIDATE;
737 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
739 case MLX4_RECV_OPCODE_SEND:
740 wc->opcode = IB_WC_RECV;
743 case MLX4_RECV_OPCODE_SEND_IMM:
744 wc->opcode = IB_WC_RECV;
745 wc->wc_flags = IB_WC_WITH_IMM;
746 wc->ex.imm_data = cqe->immed_rss_invalid;
750 wc->slid = be16_to_cpu(cqe->rlid);
751 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
752 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
753 wc->src_qp = g_mlpath_rqpn & 0xffffff;
754 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
755 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
756 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
757 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
763 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
765 struct mlx4_ib_cq *cq = to_mcq(ibcq);
766 struct mlx4_ib_qp *cur_qp = NULL;
771 spin_lock_irqsave(&cq->lock, flags);
773 for (npolled = 0; npolled < num_entries; ++npolled) {
774 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
780 mlx4_cq_set_ci(&cq->mcq);
782 spin_unlock_irqrestore(&cq->lock, flags);
784 if (err == 0 || err == -EAGAIN)
790 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
792 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
793 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
794 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
795 to_mdev(ibcq->device)->priv_uar.map,
796 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
801 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
805 struct mlx4_cqe *cqe, *dest;
809 if (srq && srq->ibsrq.xrc_cq)
813 * First we need to find the current producer index, so we
814 * know where to start cleaning from. It doesn't matter if HW
815 * adds new entries after this loop -- the QP we're worried
816 * about is already in RESET, so the new entries won't come
817 * from our QP and therefore don't need to be checked.
819 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
820 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
824 * Now sweep backwards through the CQ, removing CQ entries
825 * that match our QP by copying older entries on top of them.
827 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
828 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
829 if (((be32_to_cpu(cqe->vlan_my_qpn) & 0xffffff) == qpn) ||
831 (be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff) ==
833 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
834 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
837 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
838 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
839 memcpy(dest, cqe, sizeof *cqe);
840 dest->owner_sr_opcode = owner_bit |
841 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
846 cq->mcq.cons_index += nfreed;
848 * Make sure update of buffer contents is done before
849 * updating consumer index.
852 mlx4_cq_set_ci(&cq->mcq);
856 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
858 spin_lock_irq(&cq->lock);
859 __mlx4_ib_cq_clean(cq, qpn, srq);
860 spin_unlock_irq(&cq->lock);