2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
44 #include <sys/mutex.h>
45 #include <sys/rwlock.h>
46 #include <sys/socket.h>
52 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
53 struct c4iw_dev_ucontext *uctx)
55 struct adapter *sc = rdev->adap;
56 struct fw_ri_res_wr *res_wr;
57 struct fw_ri_res *res;
59 struct c4iw_wr_wait wr_wait;
62 wr_len = sizeof *res_wr + sizeof *res;
63 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
67 memset(res_wr, 0, wr_len);
68 res_wr->op_nres = cpu_to_be32(
69 V_FW_WR_OP(FW_RI_RES_WR) |
70 V_FW_RI_RES_WR_NRES(1) |
72 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
73 res_wr->cookie = (unsigned long) &wr_wait;
75 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
76 res->u.cq.op = FW_RI_RES_OP_RESET;
77 res->u.cq.iqid = cpu_to_be32(cq->cqid);
79 c4iw_init_wr_wait(&wr_wait);
83 c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
86 contigfree(cq->queue, cq->memsize, M_DEVBUF);
87 c4iw_put_cqid(rdev, cq->cqid, uctx);
92 create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
93 struct c4iw_dev_ucontext *uctx)
95 struct adapter *sc = rdev->adap;
96 struct fw_ri_res_wr *res_wr;
97 struct fw_ri_res *res;
99 int user = (uctx != &rdev->uctx);
100 struct c4iw_wr_wait wr_wait;
104 cq->cqid = c4iw_get_cqid(rdev, uctx);
111 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
118 cq->queue = contigmalloc(cq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
121 cq->dma_addr = vtophys(cq->queue);
127 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
128 memset(cq->queue, 0, cq->memsize);
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + sizeof *res;
133 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
138 memset(res_wr, 0, wr_len);
139 res_wr->op_nres = cpu_to_be32(
140 V_FW_WR_OP(FW_RI_RES_WR) |
141 V_FW_RI_RES_WR_NRES(1) |
143 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
144 res_wr->cookie = (unsigned long) &wr_wait;
146 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
147 res->u.cq.op = FW_RI_RES_OP_WRITE;
148 res->u.cq.iqid = cpu_to_be32(cq->cqid);
149 //Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
150 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
151 V_FW_RI_RES_WR_IQANUS(0) |
152 V_FW_RI_RES_WR_IQANUD(1) |
153 F_FW_RI_RES_WR_IQANDST |
154 V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
155 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
156 F_FW_RI_RES_WR_IQDROPRSS |
157 V_FW_RI_RES_WR_IQPCIECH(2) |
158 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
160 V_FW_RI_RES_WR_IQESIZE(1));
161 res->u.cq.iqsize = cpu_to_be16(cq->size);
162 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
164 c4iw_init_wr_wait(&wr_wait);
168 CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
169 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
174 cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
179 cq->ugts = (u64)((char*)rman_get_virtual(sc->udbs_res) +
180 (cq->cqid << rdev->cqshift));
181 cq->ugts &= PAGE_MASK;
183 "%s: UGTS %p cqid %x cqshift %d page_mask %x", __func__,
184 cq->ugts, cq->cqid, rdev->cqshift, PAGE_MASK);
188 contigfree(cq->queue, cq->memsize, M_DEVBUF);
192 c4iw_put_cqid(rdev, cq->cqid, uctx);
197 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
201 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
202 cq, cq->sw_cidx, cq->sw_pidx);
203 memset(&cqe, 0, sizeof(cqe));
204 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
205 V_CQE_OPCODE(FW_RI_SEND) |
208 V_CQE_QPID(wq->sq.qid));
209 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
210 cq->sw_queue[cq->sw_pidx] = cqe;
214 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
217 int in_use = wq->rq.in_use - count;
220 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
221 __func__, wq, cq, wq->rq.in_use, count);
223 insert_recv_cqe(wq, cq);
229 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
230 struct t4_swsqe *swcqe)
234 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
235 cq, cq->sw_cidx, cq->sw_pidx);
236 memset(&cqe, 0, sizeof(cqe));
237 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
238 V_CQE_OPCODE(swcqe->opcode) |
241 V_CQE_QPID(wq->sq.qid));
242 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
243 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
244 cq->sw_queue[cq->sw_pidx] = cqe;
248 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
251 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
252 int in_use = wq->sq.in_use - count;
257 insert_sq_cqe(wq, cq, swsqe);
259 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
260 swsqe = wq->sq.sw_sq;
267 * Move all CQEs from the HWCQ into the SWCQ.
269 void c4iw_flush_hw_cq(struct t4_cq *cq)
271 struct t4_cqe *cqe = NULL, *swcqe;
274 CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, cq, cq->cqid);
275 ret = t4_next_hw_cqe(cq, &cqe);
277 CTR3(KTR_IW_CXGBE, "%s flushing hwcq cidx 0x%x swcq pidx 0x%x",
278 __func__, cq->cidx, cq->sw_pidx);
279 swcqe = &cq->sw_queue[cq->sw_pidx];
281 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
284 ret = t4_next_hw_cqe(cq, &cqe);
288 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
290 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
293 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
296 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
299 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
304 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
311 while (ptr != cq->sw_pidx) {
312 cqe = &cq->sw_queue[ptr];
313 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
314 wq->sq.oldest_read)) &&
315 (CQE_QPID(cqe) == wq->sq.qid))
317 if (++ptr == cq->size)
320 CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
323 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
329 CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
331 while (ptr != cq->sw_pidx) {
332 cqe = &cq->sw_queue[ptr];
333 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
334 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
336 if (++ptr == cq->size)
339 CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
342 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
344 struct t4_swsqe *swsqe;
345 u16 ptr = wq->sq.cidx;
346 int count = wq->sq.in_use;
349 swsqe = &wq->sq.sw_sq[ptr];
351 if (!swsqe->signaled) {
352 if (++ptr == wq->sq.size)
354 swsqe = &wq->sq.sw_sq[ptr];
356 } else if (swsqe->complete) {
359 * Insert this completed cqe into the swcq.
362 "%s moving cqe into swcq sq idx %u cq idx %u",
363 __func__, ptr, cq->sw_pidx);
364 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
365 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
368 wq->sq.in_use -= unsignaled;
374 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
375 struct t4_cqe *read_cqe)
377 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
378 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
379 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
380 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
381 V_CQE_OPCODE(FW_RI_READ_REQ) |
383 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
387 * Return a ptr to the next read wr in the SWSQ or NULL.
389 static void advance_oldest_read(struct t4_wq *wq)
392 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
394 if (rptr == wq->sq.size)
396 while (rptr != wq->sq.pidx) {
397 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
399 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
401 if (++rptr == wq->sq.size)
404 wq->sq.oldest_read = NULL;
411 * check the validity of the first CQE,
412 * supply the wq assicated with the qpid.
414 * credit: cq credit to return to sge.
415 * cqe_flushed: 1 iff the CQE is flushed.
416 * cqe: copy of the polled CQE.
420 * -EAGAIN CQE skipped, try again.
421 * -EOVERFLOW CQ overflow detected.
423 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
424 u8 *cqe_flushed, u64 *cookie, u32 *credit)
427 struct t4_cqe *hw_cqe, read_cqe;
431 ret = t4_next_cqe(cq, &hw_cqe);
436 "%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
437 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
438 CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
440 "%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
441 __func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
442 CQE_WRID_LOW(hw_cqe));
445 * skip cqe's not affiliated with a QP.
453 * Special cqe for drain WR completions...
455 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
456 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
462 * Gotta tweak READ completions:
463 * 1) the cqe doesn't contain the sq_wptr from the wr.
464 * 2) opcode not reflected from the wr.
465 * 3) read_len not reflected from the wr.
466 * 4) cq_type is RQ_TYPE not SQ_TYPE.
468 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
471 * If this is an unsolicited read response, then the read
472 * was generated by the kernel driver as part of peer-2-peer
473 * connection setup. So ignore the completion.
475 if (!wq->sq.oldest_read) {
476 if (CQE_STATUS(hw_cqe))
477 t4_set_wq_in_error(wq);
483 * Don't write to the HWCQ, so create a new read req CQE
486 create_read_req_cqe(wq, hw_cqe, &read_cqe);
488 advance_oldest_read(wq);
491 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
492 *cqe_flushed = t4_wq_in_error(wq);
493 t4_set_wq_in_error(wq);
497 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
505 if (RQ_TYPE(hw_cqe)) {
508 * HW only validates 4 bits of MSN. So we must validate that
509 * the MSN in the SEND is the next expected MSN. If its not,
510 * then we complete this with T4_ERR_MSN and mark the wq in
514 if (t4_rq_empty(wq)) {
515 t4_set_wq_in_error(wq);
519 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
520 t4_set_wq_in_error(wq);
521 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
528 * If we get here its a send completion.
530 * Handle out of order completion. These get stuffed
531 * in the SW SQ. Then the SW SQ is walked to move any
532 * now in-order completions into the SW CQ. This handles
534 * 1) reaping unsignaled WRs when the first subsequent
535 * signaled WR is completed.
536 * 2) out of order read completions.
538 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
539 struct t4_swsqe *swsqe;
542 "%s out of order completion going in sw_sq at idx %u",
543 __func__, CQE_WRID_SQ_IDX(hw_cqe));
544 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
545 swsqe->cqe = *hw_cqe;
555 * Reap the associated WR(s) that are freed up with this
558 if (SQ_TYPE(hw_cqe)) {
559 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
560 CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
561 __func__, wq->sq.cidx);
562 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
565 CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
566 __func__, wq->rq.cidx);
567 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
568 BUG_ON(t4_rq_empty(wq));
574 * Flush any completed cqes that are now in-order.
576 flush_completed_wrs(wq, cq);
579 if (SW_CQE(hw_cqe)) {
580 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
581 __func__, cq, cq->cqid, cq->sw_cidx);
584 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
585 __func__, cq, cq->cqid, cq->cidx);
592 * Get one cq entry from c4iw and map it to openib.
597 * -EAGAIN caller must try again
598 * any other -errno fatal error
600 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
602 struct c4iw_qp *qhp = NULL;
603 struct t4_cqe cqe = {0, 0}, *rd_cqe;
610 ret = t4_next_cqe(&chp->cq, &rd_cqe);
615 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
619 spin_lock(&qhp->lock);
622 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
628 wc->vendor_err = CQE_STATUS(&cqe);
631 CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
632 __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
634 CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
635 __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
636 (unsigned long long)cookie);
638 if (CQE_TYPE(&cqe) == 0) {
639 if (!CQE_STATUS(&cqe))
640 wc->byte_len = CQE_LEN(&cqe);
643 wc->opcode = IB_WC_RECV;
644 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
645 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
646 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
647 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
650 switch (CQE_OPCODE(&cqe)) {
651 case FW_RI_RDMA_WRITE:
652 wc->opcode = IB_WC_RDMA_WRITE;
655 wc->opcode = IB_WC_RDMA_READ;
656 wc->byte_len = CQE_LEN(&cqe);
658 case FW_RI_SEND_WITH_INV:
659 case FW_RI_SEND_WITH_SE_INV:
660 wc->opcode = IB_WC_SEND;
661 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
664 case FW_RI_SEND_WITH_SE:
665 wc->opcode = IB_WC_SEND;
668 wc->opcode = IB_WC_BIND_MW;
671 case FW_RI_LOCAL_INV:
672 wc->opcode = IB_WC_LOCAL_INV;
674 case FW_RI_FAST_REGISTER:
675 wc->opcode = IB_WC_FAST_REG_MR;
677 case C4IW_DRAIN_OPCODE:
678 wc->opcode = IB_WC_SEND;
681 printf("Unexpected opcode %d "
682 "in the CQE received for QPID = 0x%0x\n",
683 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
690 wc->status = IB_WC_WR_FLUSH_ERR;
693 switch (CQE_STATUS(&cqe)) {
695 wc->status = IB_WC_SUCCESS;
698 wc->status = IB_WC_LOC_ACCESS_ERR;
701 wc->status = IB_WC_LOC_PROT_ERR;
705 wc->status = IB_WC_LOC_ACCESS_ERR;
708 wc->status = IB_WC_GENERAL_ERR;
711 wc->status = IB_WC_LOC_LEN_ERR;
713 case T4_ERR_INVALIDATE_SHARED_MR:
714 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
715 wc->status = IB_WC_MW_BIND_ERR;
719 case T4_ERR_PDU_LEN_ERR:
720 case T4_ERR_OUT_OF_RQE:
721 case T4_ERR_DDP_VERSION:
722 case T4_ERR_RDMA_VERSION:
723 case T4_ERR_DDP_QUEUE_NUM:
727 case T4_ERR_MSN_RANGE:
728 case T4_ERR_IRD_OVERFLOW:
730 case T4_ERR_INTERNAL_ERR:
731 wc->status = IB_WC_FATAL_ERR;
734 wc->status = IB_WC_WR_FLUSH_ERR;
737 printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
738 CQE_STATUS(&cqe), CQE_QPID(&cqe));
739 wc->status = IB_WC_FATAL_ERR;
744 spin_unlock(&qhp->lock);
748 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
755 chp = to_c4iw_cq(ibcq);
757 spin_lock_irqsave(&chp->lock, flags);
758 for (npolled = 0; npolled < num_entries; ++npolled) {
760 err = c4iw_poll_cq_one(chp, wc + npolled);
761 } while (err == -EAGAIN);
765 spin_unlock_irqrestore(&chp->lock, flags);
766 return !err || err == -ENODATA ? npolled : err;
769 int c4iw_destroy_cq(struct ib_cq *ib_cq)
772 struct c4iw_ucontext *ucontext;
774 CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
775 chp = to_c4iw_cq(ib_cq);
777 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
778 atomic_dec(&chp->refcnt);
779 wait_event(chp->wait, !atomic_read(&chp->refcnt));
781 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
783 destroy_cq(&chp->rhp->rdev, &chp->cq,
784 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
790 c4iw_create_cq(struct ib_device *ibdev, int entries, int vector,
791 struct ib_ucontext *ib_context, struct ib_udata *udata)
793 struct c4iw_dev *rhp;
795 struct c4iw_create_cq_resp uresp;
796 struct c4iw_ucontext *ucontext = NULL;
798 size_t memsize, hwentries;
799 struct c4iw_mm_entry *mm, *mm2;
801 CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
803 rhp = to_c4iw_dev(ibdev);
805 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
807 return ERR_PTR(-ENOMEM);
810 ucontext = to_c4iw_ucontext(ib_context);
812 /* account for the status page. */
815 /* IQ needs one extra entry to differentiate full vs empty. */
819 * entries must be multiple of 16 for HW.
821 entries = roundup(entries, 16);
824 * Make actual HW queue 2x to avoid cidx_inc overflows.
826 hwentries = entries * 2;
829 * Make HW queue at least 64 entries so GTS updates aren't too
835 memsize = hwentries * sizeof *chp->cq.queue;
838 * memsize must be a multiple of the page size if its a user cq.
841 memsize = roundup(memsize, PAGE_SIZE);
842 hwentries = memsize / sizeof *chp->cq.queue;
843 while (hwentries > T4_MAX_IQ_SIZE) {
844 memsize -= PAGE_SIZE;
845 hwentries = memsize / sizeof *chp->cq.queue;
848 chp->cq.size = hwentries;
849 chp->cq.memsize = memsize;
851 ret = create_cq(&rhp->rdev, &chp->cq,
852 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
857 chp->cq.size--; /* status page */
858 chp->ibcq.cqe = entries - 2;
859 spin_lock_init(&chp->lock);
860 spin_lock_init(&chp->comp_handler_lock);
861 atomic_set(&chp->refcnt, 1);
862 init_waitqueue_head(&chp->wait);
863 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
868 mm = kmalloc(sizeof *mm, GFP_KERNEL);
871 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
875 memset(&uresp, 0, sizeof(uresp));
876 uresp.qid_mask = rhp->rdev.cqmask;
877 uresp.cqid = chp->cq.cqid;
878 uresp.size = chp->cq.size;
879 uresp.memsize = chp->cq.memsize;
880 spin_lock(&ucontext->mmap_lock);
881 uresp.key = ucontext->key;
882 ucontext->key += PAGE_SIZE;
883 uresp.gts_key = ucontext->key;
884 ucontext->key += PAGE_SIZE;
885 spin_unlock(&ucontext->mmap_lock);
886 ret = ib_copy_to_udata(udata, &uresp,
887 sizeof(uresp) - sizeof(uresp.reserved));
892 mm->addr = vtophys(chp->cq.queue);
893 mm->len = chp->cq.memsize;
894 insert_mmap(ucontext, mm);
896 mm2->key = uresp.gts_key;
897 mm2->addr = chp->cq.ugts;
898 mm2->len = PAGE_SIZE;
899 insert_mmap(ucontext, mm2);
902 "%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
903 __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
904 (unsigned long long) chp->cq.dma_addr);
911 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
913 destroy_cq(&chp->rhp->rdev, &chp->cq,
914 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
920 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
925 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
931 chp = to_c4iw_cq(ibcq);
932 spin_lock_irqsave(&chp->lock, flag);
933 ret = t4_arm_cq(&chp->cq,
934 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
935 spin_unlock_irqrestore(&chp->lock, flag);
936 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))