2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
46 #include <sys/mutex.h>
47 #include <sys/rwlock.h>
48 #include <sys/socket.h>
54 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
55 struct c4iw_dev_ucontext *uctx)
57 struct adapter *sc = rdev->adap;
58 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
59 struct fw_ri_res_wr *res_wr;
60 struct fw_ri_res *res;
62 struct c4iw_wr_wait wr_wait;
65 wr_len = sizeof *res_wr + sizeof *res;
66 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
70 memset(res_wr, 0, wr_len);
71 res_wr->op_nres = cpu_to_be32(
72 V_FW_WR_OP(FW_RI_RES_WR) |
73 V_FW_RI_RES_WR_NRES(1) |
75 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
76 res_wr->cookie = (unsigned long) &wr_wait;
78 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
79 res->u.cq.op = FW_RI_RES_OP_RESET;
80 res->u.cq.iqid = cpu_to_be32(cq->cqid);
82 c4iw_init_wr_wait(&wr_wait);
86 c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
89 dma_free_coherent(rhp->ibdev.dma_device,
90 cq->memsize, cq->queue,
91 dma_unmap_addr(cq, mapping));
92 c4iw_put_cqid(rdev, cq->cqid, uctx);
97 create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
98 struct c4iw_dev_ucontext *uctx)
100 struct adapter *sc = rdev->adap;
101 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
102 struct fw_ri_res_wr *res_wr;
103 struct fw_ri_res *res;
105 int user = (uctx != &rdev->uctx);
106 struct c4iw_wr_wait wr_wait;
109 u64 cq_bar2_qoffset = 0;
111 cq->cqid = c4iw_get_cqid(rdev, uctx);
118 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
124 cq->queue = dma_alloc_coherent(rhp->ibdev.dma_device, cq->memsize,
125 &cq->dma_addr, GFP_KERNEL);
130 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
131 memset(cq->queue, 0, cq->memsize);
133 /* build fw_ri_res_wr */
134 wr_len = sizeof *res_wr + sizeof *res;
136 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 V_FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(1) |
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (unsigned long) &wr_wait;
149 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
150 res->u.cq.op = FW_RI_RES_OP_WRITE;
151 res->u.cq.iqid = cpu_to_be32(cq->cqid);
152 //Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
153 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
154 V_FW_RI_RES_WR_IQANUS(0) |
155 V_FW_RI_RES_WR_IQANUD(1) |
156 F_FW_RI_RES_WR_IQANDST |
157 V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
158 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
159 F_FW_RI_RES_WR_IQDROPRSS |
160 V_FW_RI_RES_WR_IQPCIECH(2) |
161 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
163 V_FW_RI_RES_WR_IQESIZE(1));
164 res->u.cq.iqsize = cpu_to_be16(cq->size);
165 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
167 c4iw_init_wr_wait(&wr_wait);
171 CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
172 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
179 /* Determine the BAR2 queue offset and qid. */
180 t4_bar2_sge_qregs(rdev->adap, cq->cqid, T4_BAR2_QTYPE_INGRESS, user,
181 &cq_bar2_qoffset, &cq->bar2_qid);
183 /* If user mapping then compute the page-aligned physical
184 * address for mapping.
187 cq->bar2_pa = (rdev->bar2_pa + cq_bar2_qoffset) & PAGE_MASK;
189 cq->bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
194 dma_free_coherent(rhp->ibdev.dma_device, cq->memsize, cq->queue,
195 dma_unmap_addr(cq, mapping));
199 c4iw_put_cqid(rdev, cq->cqid, uctx);
204 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
208 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
209 cq, cq->sw_cidx, cq->sw_pidx);
210 memset(&cqe, 0, sizeof(cqe));
211 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
212 V_CQE_OPCODE(FW_RI_SEND) |
215 V_CQE_QPID(wq->sq.qid));
216 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
217 cq->sw_queue[cq->sw_pidx] = cqe;
221 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
224 int in_use = wq->rq.in_use - count;
227 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
228 __func__, wq, cq, wq->rq.in_use, count);
230 insert_recv_cqe(wq, cq);
236 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
237 struct t4_swsqe *swcqe)
241 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
242 cq, cq->sw_cidx, cq->sw_pidx);
243 memset(&cqe, 0, sizeof(cqe));
244 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
245 V_CQE_OPCODE(swcqe->opcode) |
248 V_CQE_QPID(wq->sq.qid));
249 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
250 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
251 cq->sw_queue[cq->sw_pidx] = cqe;
255 static void advance_oldest_read(struct t4_wq *wq);
257 int c4iw_flush_sq(struct c4iw_qp *qhp)
260 struct t4_wq *wq = &qhp->wq;
261 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
262 struct t4_cq *cq = &chp->cq;
264 struct t4_swsqe *swsqe;
266 if (wq->sq.flush_cidx == -1)
267 wq->sq.flush_cidx = wq->sq.cidx;
268 idx = wq->sq.flush_cidx;
269 BUG_ON(idx >= wq->sq.size);
270 while (idx != wq->sq.pidx) {
271 swsqe = &wq->sq.sw_sq[idx];
272 BUG_ON(swsqe->flushed);
274 insert_sq_cqe(wq, cq, swsqe);
275 if (wq->sq.oldest_read == swsqe) {
276 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
277 advance_oldest_read(wq);
280 if (++idx == wq->sq.size)
283 wq->sq.flush_cidx += flushed;
284 if (wq->sq.flush_cidx >= wq->sq.size)
285 wq->sq.flush_cidx -= wq->sq.size;
289 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
291 struct t4_swsqe *swsqe;
294 if (wq->sq.flush_cidx == -1)
295 wq->sq.flush_cidx = wq->sq.cidx;
296 cidx = wq->sq.flush_cidx;
297 BUG_ON(cidx > wq->sq.size);
299 while (cidx != wq->sq.pidx) {
300 swsqe = &wq->sq.sw_sq[cidx];
301 if (!swsqe->signaled) {
302 if (++cidx == wq->sq.size)
304 } else if (swsqe->complete) {
306 BUG_ON(swsqe->flushed);
309 * Insert this completed cqe into the swcq.
312 "%s moving cqe into swcq sq idx %u cq idx %u\n",
313 __func__, cidx, cq->sw_pidx);
314 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
315 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
318 if (++cidx == wq->sq.size)
320 wq->sq.flush_cidx = cidx;
326 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
327 struct t4_cqe *read_cqe)
329 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
330 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
331 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
332 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
333 V_CQE_OPCODE(FW_RI_READ_REQ) |
335 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
338 static void advance_oldest_read(struct t4_wq *wq)
341 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
343 if (rptr == wq->sq.size)
345 while (rptr != wq->sq.pidx) {
346 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
348 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
350 if (++rptr == wq->sq.size)
353 wq->sq.oldest_read = NULL;
357 * Move all CQEs from the HWCQ into the SWCQ.
358 * Deal with out-of-order and/or completions that complete
359 * prior unsignalled WRs.
361 void c4iw_flush_hw_cq(struct c4iw_cq *chp)
363 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
365 struct t4_swsqe *swsqe;
368 CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, &chp->cq,
370 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
373 * This logic is similar to poll_cq(), but not quite the same
374 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
375 * also do any translation magic that poll_cq() normally does.
378 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
381 * drop CQEs with no associated QP
386 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
389 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
391 /* If we have reached here because of async
392 * event or other error, and have egress error
395 if (CQE_TYPE(hw_cqe) == 1)
398 /* drop peer2peer RTR reads.
400 if (CQE_WRID_STAG(hw_cqe) == 1)
404 * Eat completions for unsignaled read WRs.
406 if (!qhp->wq.sq.oldest_read->signaled) {
407 advance_oldest_read(&qhp->wq);
412 * Don't write to the HWCQ, create a new read req CQE
413 * in local memory and move it into the swcq.
415 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
417 advance_oldest_read(&qhp->wq);
420 /* if its a SQ completion, then do the magic to move all the
421 * unsignaled and now in-order completions into the swcq.
423 if (SQ_TYPE(hw_cqe)) {
424 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
425 swsqe->cqe = *hw_cqe;
427 flush_completed_wrs(&qhp->wq, &chp->cq);
429 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
431 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
432 t4_swcq_produce(&chp->cq);
435 t4_hwcq_consume(&chp->cq);
436 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
440 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
442 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
445 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
448 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
451 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
456 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
462 CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
464 while (ptr != cq->sw_pidx) {
465 cqe = &cq->sw_queue[ptr];
466 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
467 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
469 if (++ptr == cq->size)
472 CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
479 * check the validity of the first CQE,
480 * supply the wq assicated with the qpid.
482 * credit: cq credit to return to sge.
483 * cqe_flushed: 1 iff the CQE is flushed.
484 * cqe: copy of the polled CQE.
488 * -EAGAIN CQE skipped, try again.
489 * -EOVERFLOW CQ overflow detected.
491 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
492 u8 *cqe_flushed, u64 *cookie, u32 *credit)
495 struct t4_cqe *hw_cqe, read_cqe;
499 ret = t4_next_cqe(cq, &hw_cqe);
504 "%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
505 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
506 CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
508 "%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
509 __func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
510 CQE_WRID_LOW(hw_cqe));
513 * skip cqe's not affiliated with a QP.
521 * skip hw cqe's if the wq is flushed.
523 if (wq->flushed && !SW_CQE(hw_cqe)) {
529 * skip TERMINATE cqes...
531 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
537 * Special cqe for drain WR completions...
539 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
540 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
546 * Gotta tweak READ completions:
547 * 1) the cqe doesn't contain the sq_wptr from the wr.
548 * 2) opcode not reflected from the wr.
549 * 3) read_len not reflected from the wr.
550 * 4) cq_type is RQ_TYPE not SQ_TYPE.
552 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
554 /* If we have reached here because of async
555 * event or other error, and have egress error
558 if (CQE_TYPE(hw_cqe) == 1) {
559 if (CQE_STATUS(hw_cqe))
560 t4_set_wq_in_error(wq);
565 /* If this is an unsolicited read response, then the read
566 * was generated by the kernel driver as part of peer-2-peer
567 * connection setup. So ignore the completion.
569 if (CQE_WRID_STAG(hw_cqe) == 1) {
570 if (CQE_STATUS(hw_cqe))
571 t4_set_wq_in_error(wq);
577 * Eat completions for unsignaled read WRs.
579 if (!wq->sq.oldest_read->signaled) {
580 advance_oldest_read(wq);
586 * Don't write to the HWCQ, so create a new read req CQE
589 create_read_req_cqe(wq, hw_cqe, &read_cqe);
591 advance_oldest_read(wq);
594 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
595 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
596 t4_set_wq_in_error(wq);
602 if (RQ_TYPE(hw_cqe)) {
605 * HW only validates 4 bits of MSN. So we must validate that
606 * the MSN in the SEND is the next expected MSN. If its not,
607 * then we complete this with T4_ERR_MSN and mark the wq in
611 if (t4_rq_empty(wq)) {
612 t4_set_wq_in_error(wq);
616 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
617 t4_set_wq_in_error(wq);
618 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
625 * If we get here its a send completion.
627 * Handle out of order completion. These get stuffed
628 * in the SW SQ. Then the SW SQ is walked to move any
629 * now in-order completions into the SW CQ. This handles
631 * 1) reaping unsignaled WRs when the first subsequent
632 * signaled WR is completed.
633 * 2) out of order read completions.
635 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
636 struct t4_swsqe *swsqe;
639 "%s out of order completion going in sw_sq at idx %u",
640 __func__, CQE_WRID_SQ_IDX(hw_cqe));
641 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
642 swsqe->cqe = *hw_cqe;
652 * Reap the associated WR(s) that are freed up with this
655 if (SQ_TYPE(hw_cqe)) {
656 int idx = CQE_WRID_SQ_IDX(hw_cqe);
657 BUG_ON(idx >= wq->sq.size);
660 * Account for any unsignaled completions completed by
661 * this signaled completion. In this case, cidx points
662 * to the first unsignaled one, and idx points to the
663 * signaled one. So adjust in_use based on this delta.
664 * if this is not completing any unsigned wrs, then the
665 * delta will be 0. Handle wrapping also!
667 if (idx < wq->sq.cidx)
668 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
670 wq->sq.in_use -= idx - wq->sq.cidx;
671 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
673 wq->sq.cidx = (uint16_t)idx;
674 CTR2(KTR_IW_CXGBE, "%s completing sq idx %u\n",
675 __func__, wq->sq.cidx);
676 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
679 CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
680 __func__, wq->rq.cidx);
681 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
682 BUG_ON(t4_rq_empty(wq));
689 * Flush any completed cqes that are now in-order.
691 flush_completed_wrs(wq, cq);
694 if (SW_CQE(hw_cqe)) {
695 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
696 __func__, cq, cq->cqid, cq->sw_cidx);
699 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
700 __func__, cq, cq->cqid, cq->cidx);
707 * Get one cq entry from c4iw and map it to openib.
712 * -EAGAIN caller must try again
713 * any other -errno fatal error
715 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
717 struct c4iw_qp *qhp = NULL;
718 struct t4_cqe cqe = {0, 0}, *rd_cqe;
725 ret = t4_next_cqe(&chp->cq, &rd_cqe);
730 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
734 spin_lock(&qhp->lock);
737 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
743 wc->vendor_err = CQE_STATUS(&cqe);
746 CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
747 __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
749 CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
750 __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
751 (unsigned long long)cookie);
753 if (CQE_TYPE(&cqe) == 0) {
754 if (!CQE_STATUS(&cqe))
755 wc->byte_len = CQE_LEN(&cqe);
758 wc->opcode = IB_WC_RECV;
759 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
760 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
761 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
762 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
763 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
766 switch (CQE_OPCODE(&cqe)) {
767 case FW_RI_RDMA_WRITE:
768 wc->opcode = IB_WC_RDMA_WRITE;
771 wc->opcode = IB_WC_RDMA_READ;
772 wc->byte_len = CQE_LEN(&cqe);
774 case FW_RI_SEND_WITH_INV:
775 case FW_RI_SEND_WITH_SE_INV:
776 wc->opcode = IB_WC_SEND;
777 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
780 case FW_RI_SEND_WITH_SE:
781 wc->opcode = IB_WC_SEND;
783 case FW_RI_LOCAL_INV:
784 wc->opcode = IB_WC_LOCAL_INV;
786 case FW_RI_FAST_REGISTER:
787 wc->opcode = IB_WC_REG_MR;
789 /* Invalidate the MR if the fastreg failed */
790 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
791 c4iw_invalidate_mr(qhp->rhp,
792 CQE_WRID_FR_STAG(&cqe));
794 case C4IW_DRAIN_OPCODE:
795 wc->opcode = IB_WC_SEND;
798 printf("Unexpected opcode %d "
799 "in the CQE received for QPID = 0x%0x\n",
800 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
807 wc->status = IB_WC_WR_FLUSH_ERR;
810 switch (CQE_STATUS(&cqe)) {
812 wc->status = IB_WC_SUCCESS;
815 wc->status = IB_WC_LOC_ACCESS_ERR;
818 wc->status = IB_WC_LOC_PROT_ERR;
822 wc->status = IB_WC_LOC_ACCESS_ERR;
825 wc->status = IB_WC_GENERAL_ERR;
828 wc->status = IB_WC_LOC_LEN_ERR;
830 case T4_ERR_INVALIDATE_SHARED_MR:
831 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
832 wc->status = IB_WC_MW_BIND_ERR;
836 case T4_ERR_PDU_LEN_ERR:
837 case T4_ERR_OUT_OF_RQE:
838 case T4_ERR_DDP_VERSION:
839 case T4_ERR_RDMA_VERSION:
840 case T4_ERR_DDP_QUEUE_NUM:
844 case T4_ERR_MSN_RANGE:
845 case T4_ERR_IRD_OVERFLOW:
847 case T4_ERR_INTERNAL_ERR:
848 wc->status = IB_WC_FATAL_ERR;
851 wc->status = IB_WC_WR_FLUSH_ERR;
854 printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
855 CQE_STATUS(&cqe), CQE_QPID(&cqe));
856 wc->status = IB_WC_FATAL_ERR;
861 spin_unlock(&qhp->lock);
865 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
872 chp = to_c4iw_cq(ibcq);
874 spin_lock_irqsave(&chp->lock, flags);
875 for (npolled = 0; npolled < num_entries; ++npolled) {
877 err = c4iw_poll_cq_one(chp, wc + npolled);
878 } while (err == -EAGAIN);
882 spin_unlock_irqrestore(&chp->lock, flags);
883 return !err || err == -ENODATA ? npolled : err;
886 int c4iw_destroy_cq(struct ib_cq *ib_cq)
889 struct c4iw_ucontext *ucontext;
891 CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
892 chp = to_c4iw_cq(ib_cq);
894 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
895 atomic_dec(&chp->refcnt);
896 wait_event(chp->wait, !atomic_read(&chp->refcnt));
898 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
900 destroy_cq(&chp->rhp->rdev, &chp->cq,
901 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
907 c4iw_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr,
908 struct ib_ucontext *ib_context, struct ib_udata *udata)
910 int entries = attr->cqe;
911 int vector = attr->comp_vector;
912 struct c4iw_dev *rhp;
914 struct c4iw_create_cq_resp uresp;
915 struct c4iw_ucontext *ucontext = NULL;
917 size_t memsize, hwentries;
918 struct c4iw_mm_entry *mm, *mm2;
920 CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
922 return ERR_PTR(-EINVAL);
924 rhp = to_c4iw_dev(ibdev);
926 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
928 return ERR_PTR(-ENOMEM);
932 ucontext = to_c4iw_ucontext(ib_context);
934 /* account for the status page. */
937 /* IQ needs one extra entry to differentiate full vs empty. */
941 * entries must be multiple of 16 for HW.
943 entries = roundup(entries, 16);
946 * Make actual HW queue 2x to avoid cdix_inc overflows.
948 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
951 * Make HW queue at least 64 entries so GTS updates aren't too
957 memsize = hwentries * sizeof *chp->cq.queue;
960 * memsize must be a multiple of the page size if its a user cq.
963 memsize = roundup(memsize, PAGE_SIZE);
964 chp->cq.size = hwentries;
965 chp->cq.memsize = memsize;
966 chp->cq.vector = vector;
968 ret = create_cq(&rhp->rdev, &chp->cq,
969 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
974 chp->cq.size--; /* status page */
975 chp->ibcq.cqe = entries - 2;
976 spin_lock_init(&chp->lock);
977 spin_lock_init(&chp->comp_handler_lock);
978 atomic_set(&chp->refcnt, 1);
979 init_waitqueue_head(&chp->wait);
980 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
986 mm = kmalloc(sizeof *mm, GFP_KERNEL);
989 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
993 memset(&uresp, 0, sizeof(uresp));
994 uresp.qid_mask = rhp->rdev.cqmask;
995 uresp.cqid = chp->cq.cqid;
996 uresp.size = chp->cq.size;
997 uresp.memsize = chp->cq.memsize;
998 spin_lock(&ucontext->mmap_lock);
999 uresp.key = ucontext->key;
1000 ucontext->key += PAGE_SIZE;
1001 uresp.gts_key = ucontext->key;
1002 ucontext->key += PAGE_SIZE;
1003 spin_unlock(&ucontext->mmap_lock);
1004 ret = ib_copy_to_udata(udata, &uresp,
1005 sizeof(uresp) - sizeof(uresp.reserved));
1009 mm->key = uresp.key;
1010 mm->addr = vtophys(chp->cq.queue);
1011 mm->len = chp->cq.memsize;
1012 insert_mmap(ucontext, mm);
1014 mm2->key = uresp.gts_key;
1015 mm2->addr = chp->cq.bar2_pa;
1016 mm2->len = PAGE_SIZE;
1017 insert_mmap(ucontext, mm2);
1020 "%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
1021 __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1022 (unsigned long long) chp->cq.dma_addr);
1029 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
1031 destroy_cq(&chp->rhp->rdev, &chp->cq,
1032 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1035 return ERR_PTR(ret);
1038 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1043 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1045 struct c4iw_cq *chp;
1049 chp = to_c4iw_cq(ibcq);
1050 spin_lock_irqsave(&chp->lock, flag);
1052 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1053 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1054 ret = t4_cq_notempty(&chp->cq);
1055 spin_unlock_irqrestore(&chp->lock, flag);