2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet/ip.h>
52 #include <netinet/ip_var.h>
53 #include <netinet/tcp_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcpip.h>
57 #include <netinet/toecore.h>
61 struct cpl_set_tcb_rpl;
62 #include <linux/types.h>
64 #include "tom/t4_tom.h"
69 static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
70 static int max_fr_immd = T4_MAX_FR_IMMD;//SYSCTL parameter later...
72 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
76 spin_lock_irq(&dev->lock);
77 if (ird <= dev->avail_ird)
78 dev->avail_ird -= ird;
81 spin_unlock_irq(&dev->lock);
84 log(LOG_WARNING, "%s: device IRD resources exhausted\n",
85 device_get_nameunit(dev->rdev.adap->dev));
90 static void free_ird(struct c4iw_dev *dev, int ird)
92 spin_lock_irq(&dev->lock);
93 dev->avail_ird += ird;
94 spin_unlock_irq(&dev->lock);
97 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
100 spin_lock_irqsave(&qhp->lock, flag);
101 qhp->attr.state = state;
102 spin_unlock_irqrestore(&qhp->lock, flag);
105 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
106 struct c4iw_dev_ucontext *uctx)
108 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
110 * uP clears EQ contexts when the connection exits rdma mode,
111 * so no need to post a RESET WR for these EQs.
113 dma_free_coherent(rhp->ibdev.dma_device,
114 wq->rq.memsize, wq->rq.queue,
115 dma_unmap_addr(&wq->rq, mapping));
116 dma_free_coherent(rhp->ibdev.dma_device,
117 wq->sq.memsize, wq->sq.queue,
118 dma_unmap_addr(&wq->sq, mapping));
119 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
122 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
123 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
127 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
128 struct t4_cq *rcq, struct t4_cq *scq,
129 struct c4iw_dev_ucontext *uctx)
131 struct adapter *sc = rdev->adap;
132 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev);
133 int user = (uctx != &rdev->uctx);
134 struct fw_ri_res_wr *res_wr;
135 struct fw_ri_res *res;
137 struct c4iw_wr_wait wr_wait;
141 u64 sq_bar2_qoffset = 0, rq_bar2_qoffset = 0;
143 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
147 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
154 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
161 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
170 * RQT must be a power of 2 and at least 16 deep.
172 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
173 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
174 if (!wq->rq.rqt_hwaddr) {
179 /*QP memory, allocate DMAable memory for Send & Receive Queues */
180 wq->sq.queue = dma_alloc_coherent(rhp->ibdev.dma_device, wq->sq.memsize,
181 &(wq->sq.dma_addr), GFP_KERNEL);
186 wq->sq.phys_addr = vtophys(wq->sq.queue);
187 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
188 memset(wq->sq.queue, 0, wq->sq.memsize);
190 wq->rq.queue = dma_alloc_coherent(rhp->ibdev.dma_device,
191 wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL);
196 wq->rq.phys_addr = vtophys(wq->rq.queue);
197 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
198 memset(wq->rq.queue, 0, wq->rq.memsize);
201 "%s QP sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx",
203 wq->sq.queue, (unsigned long long)wq->sq.phys_addr,
204 wq->rq.queue, (unsigned long long)wq->rq.phys_addr);
206 /* Doorbell/WC regions, determine the BAR2 queue offset and qid. */
207 t4_bar2_sge_qregs(rdev->adap, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, user,
208 &sq_bar2_qoffset, &wq->sq.bar2_qid);
209 t4_bar2_sge_qregs(rdev->adap, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, user,
210 &rq_bar2_qoffset, &wq->rq.bar2_qid);
213 /* Compute BAR2 DB/WC physical address(page-aligned) for
216 wq->sq.bar2_pa = (rdev->bar2_pa + sq_bar2_qoffset) & PAGE_MASK;
217 wq->rq.bar2_pa = (rdev->bar2_pa + rq_bar2_qoffset) & PAGE_MASK;
219 "%s BAR2 DB/WC sq base pa 0x%llx rq base pa 0x%llx",
220 __func__, (unsigned long long)wq->sq.bar2_pa,
221 (unsigned long long)wq->rq.bar2_pa);
223 /* Compute BAR2 DB/WC virtual address to access in kernel. */
224 wq->sq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
226 wq->rq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva +
228 CTR3(KTR_IW_CXGBE, "%s BAR2 DB/WC sq base va %p rq base va %p",
229 __func__, (unsigned long long)wq->sq.bar2_va,
230 (unsigned long long)wq->rq.bar2_va);
236 /* build fw_ri_res_wr */
237 wr_len = sizeof *res_wr + 2 * sizeof *res;
239 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
246 memset(res_wr, 0, wr_len);
247 res_wr->op_nres = cpu_to_be32(
248 V_FW_WR_OP(FW_RI_RES_WR) |
249 V_FW_RI_RES_WR_NRES(2) |
251 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
252 res_wr->cookie = (unsigned long) &wr_wait;
254 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
255 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
257 /* eqsize is the number of 64B entries plus the status page size. */
258 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
259 rdev->hw_queue.t4_eq_status_entries;
261 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
262 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
263 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
264 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
265 V_FW_RI_RES_WR_IQID(scq->cqid));
266 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
267 V_FW_RI_RES_WR_DCAEN(0) |
268 V_FW_RI_RES_WR_DCACPU(0) |
269 V_FW_RI_RES_WR_FBMIN(2) |
270 V_FW_RI_RES_WR_FBMAX(3) |
271 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
272 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
273 V_FW_RI_RES_WR_EQSIZE(eqsize));
274 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
275 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
277 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
278 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
280 /* eqsize is the number of 64B entries plus the status page size. */
281 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
282 rdev->hw_queue.t4_eq_status_entries;
283 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
284 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
285 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
286 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
287 V_FW_RI_RES_WR_IQID(rcq->cqid));
288 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
289 V_FW_RI_RES_WR_DCAEN(0) |
290 V_FW_RI_RES_WR_DCACPU(0) |
291 V_FW_RI_RES_WR_FBMIN(2) |
292 V_FW_RI_RES_WR_FBMAX(3) |
293 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
294 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
295 V_FW_RI_RES_WR_EQSIZE(eqsize));
296 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
297 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
299 c4iw_init_wr_wait(&wr_wait);
302 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid,
308 "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
309 __func__, wq->sq.qid, wq->rq.qid,
310 (unsigned long long)wq->sq.bar2_va,
311 (unsigned long long)wq->rq.bar2_va);
315 dma_free_coherent(rhp->ibdev.dma_device,
316 wq->rq.memsize, wq->rq.queue,
317 dma_unmap_addr(&wq->rq, mapping));
319 dma_free_coherent(rhp->ibdev.dma_device,
320 wq->sq.memsize, wq->sq.queue,
321 dma_unmap_addr(&wq->sq, mapping));
323 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
329 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
331 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
335 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
336 struct ib_send_wr *wr, int max, u32 *plenp)
343 dstp = (u8 *)immdp->data;
344 for (i = 0; i < wr->num_sge; i++) {
345 if ((plen + wr->sg_list[i].length) > max)
347 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
348 plen += wr->sg_list[i].length;
349 rem = wr->sg_list[i].length;
351 if (dstp == (u8 *)&sq->queue[sq->size])
352 dstp = (u8 *)sq->queue;
353 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
356 len = (u8 *)&sq->queue[sq->size] - dstp;
357 memcpy(dstp, srcp, len);
363 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
365 memset(dstp, 0, len);
366 immdp->op = FW_RI_DATA_IMMD;
369 immdp->immdlen = cpu_to_be32(plen);
374 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
375 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
376 int num_sge, u32 *plenp)
381 __be64 *flitp = (__be64 *)isglp->sge;
383 for (i = 0; i < num_sge; i++) {
384 if ((plen + sg_list[i].length) < plen)
386 plen += sg_list[i].length;
387 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
389 if (++flitp == queue_end)
391 *flitp = cpu_to_be64(sg_list[i].addr);
392 if (++flitp == queue_end)
395 *flitp = (__force __be64)0;
396 isglp->op = FW_RI_DATA_ISGL;
398 isglp->nsge = cpu_to_be16(num_sge);
405 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
406 struct ib_send_wr *wr, u8 *len16)
412 if (wr->num_sge > T4_MAX_SEND_SGE)
414 switch (wr->opcode) {
416 if (wr->send_flags & IB_SEND_SOLICITED)
417 wqe->send.sendop_pkd = cpu_to_be32(
418 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
420 wqe->send.sendop_pkd = cpu_to_be32(
421 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
422 wqe->send.stag_inv = 0;
424 case IB_WR_SEND_WITH_INV:
425 if (wr->send_flags & IB_SEND_SOLICITED)
426 wqe->send.sendop_pkd = cpu_to_be32(
427 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
429 wqe->send.sendop_pkd = cpu_to_be32(
430 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
431 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
442 if (wr->send_flags & IB_SEND_INLINE) {
443 ret = build_immd(sq, wqe->send.u.immd_src, wr,
444 T4_MAX_SEND_INLINE, &plen);
447 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
450 ret = build_isgl((__be64 *)sq->queue,
451 (__be64 *)&sq->queue[sq->size],
452 wqe->send.u.isgl_src,
453 wr->sg_list, wr->num_sge, &plen);
456 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
457 wr->num_sge * sizeof(struct fw_ri_sge);
460 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
461 wqe->send.u.immd_src[0].r1 = 0;
462 wqe->send.u.immd_src[0].r2 = 0;
463 wqe->send.u.immd_src[0].immdlen = 0;
464 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
467 *len16 = DIV_ROUND_UP(size, 16);
468 wqe->send.plen = cpu_to_be32(plen);
472 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
473 struct ib_send_wr *wr, u8 *len16)
479 if (wr->num_sge > T4_MAX_SEND_SGE)
481 wqe->write.immd_data = 0;
482 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
483 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
485 if (wr->send_flags & IB_SEND_INLINE) {
486 ret = build_immd(sq, wqe->write.u.immd_src, wr,
487 T4_MAX_WRITE_INLINE, &plen);
490 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
493 ret = build_isgl((__be64 *)sq->queue,
494 (__be64 *)&sq->queue[sq->size],
495 wqe->write.u.isgl_src,
496 wr->sg_list, wr->num_sge, &plen);
499 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
500 wr->num_sge * sizeof(struct fw_ri_sge);
503 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
504 wqe->write.u.immd_src[0].r1 = 0;
505 wqe->write.u.immd_src[0].r2 = 0;
506 wqe->write.u.immd_src[0].immdlen = 0;
507 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
510 *len16 = DIV_ROUND_UP(size, 16);
511 wqe->write.plen = cpu_to_be32(plen);
515 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
519 if (wr->num_sge && wr->sg_list[0].length) {
520 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
521 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
523 wqe->read.to_src_lo =
524 cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
525 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
526 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
527 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
529 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
531 wqe->read.stag_src = cpu_to_be32(2);
532 wqe->read.to_src_hi = 0;
533 wqe->read.to_src_lo = 0;
534 wqe->read.stag_sink = cpu_to_be32(2);
536 wqe->read.to_sink_hi = 0;
537 wqe->read.to_sink_lo = 0;
541 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
545 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
546 struct ib_recv_wr *wr, u8 *len16)
550 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
551 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
552 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
555 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
556 wr->num_sge * sizeof(struct fw_ri_sge), 16);
560 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
563 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
565 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
569 static void free_qp_work(struct work_struct *work)
571 struct c4iw_ucontext *ucontext;
573 struct c4iw_dev *rhp;
575 qhp = container_of(work, struct c4iw_qp, free_work);
576 ucontext = qhp->ucontext;
579 CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p", __func__,
581 destroy_qp(&rhp->rdev, &qhp->wq,
582 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
585 c4iw_put_ucontext(ucontext);
589 static void queue_qp_free(struct kref *kref)
593 qhp = container_of(kref, struct c4iw_qp, kref);
594 CTR2(KTR_IW_CXGBE, "%s qhp %p", __func__, qhp);
595 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
598 void c4iw_qp_add_ref(struct ib_qp *qp)
600 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
601 kref_get(&to_c4iw_qp(qp)->kref);
604 void c4iw_qp_rem_ref(struct ib_qp *qp)
606 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
607 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
610 static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
612 struct t4_cqe cqe = {};
613 struct c4iw_cq *schp;
617 schp = to_c4iw_cq(qhp->ibqp.send_cq);
620 PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
621 cqe.u.drain_cookie = wr->wr_id;
622 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
623 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
626 V_CQE_QPID(qhp->wq.sq.qid));
628 spin_lock_irqsave(&schp->lock, flag);
629 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
630 cq->sw_queue[cq->sw_pidx] = cqe;
632 spin_unlock_irqrestore(&schp->lock, flag);
634 spin_lock_irqsave(&schp->comp_handler_lock, flag);
635 (*schp->ibcq.comp_handler)(&schp->ibcq,
636 schp->ibcq.cq_context);
637 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
640 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
642 struct t4_cqe cqe = {};
643 struct c4iw_cq *rchp;
647 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
650 PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
651 cqe.u.drain_cookie = wr->wr_id;
652 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
653 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
656 V_CQE_QPID(qhp->wq.sq.qid));
658 spin_lock_irqsave(&rchp->lock, flag);
659 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
660 cq->sw_queue[cq->sw_pidx] = cqe;
662 spin_unlock_irqrestore(&rchp->lock, flag);
664 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
665 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
666 rchp->ibcq.cq_context);
667 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
670 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
671 struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16)
673 __be64 *p = (__be64 *)fr->pbl;
675 fr->r2 = cpu_to_be32(0);
676 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
678 fr->tpte.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
679 V_FW_RI_TPTE_STAGKEY((mhp->ibmr.rkey & M_FW_RI_TPTE_STAGKEY)) |
680 V_FW_RI_TPTE_STAGSTATE(1) |
681 V_FW_RI_TPTE_STAGTYPE(FW_RI_STAG_NSMR) |
682 V_FW_RI_TPTE_PDID(mhp->attr.pdid));
683 fr->tpte.locread_to_qpid = cpu_to_be32(
684 V_FW_RI_TPTE_PERM(c4iw_ib_to_tpt_access(wr->access)) |
685 V_FW_RI_TPTE_ADDRTYPE(FW_RI_VA_BASED_TO) |
686 V_FW_RI_TPTE_PS(ilog2(wr->mr->page_size) - 12));
687 fr->tpte.nosnoop_pbladdr = cpu_to_be32(V_FW_RI_TPTE_PBLADDR(
688 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
689 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
690 fr->tpte.len_hi = cpu_to_be32(0);
691 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
692 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
693 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
695 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
696 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
698 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
701 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
702 struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
705 struct fw_ri_immd *imdp;
708 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
711 if (mhp->mpl_len > t4_max_fr_depth(use_dsgl && dsgl_supported))
714 wqe->fr.qpbinde_to_dcacpu = 0;
715 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
716 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
717 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
719 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
720 wqe->fr.stag = cpu_to_be32(wr->key);
721 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
722 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
725 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
726 struct fw_ri_dsgl *sglp;
728 for (i = 0; i < mhp->mpl_len; i++)
730 (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
732 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
733 sglp->op = FW_RI_DATA_DSGL;
735 sglp->nsge = cpu_to_be16(1);
736 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
737 sglp->len0 = cpu_to_be32(pbllen);
739 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
741 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
742 imdp->op = FW_RI_DATA_IMMD;
745 imdp->immdlen = cpu_to_be32(pbllen);
746 p = (__be64 *)(imdp + 1);
748 for (i = 0; i < mhp->mpl_len; i++) {
749 *p = cpu_to_be64((u64)mhp->mpl[i]);
751 if (++p == (__be64 *)&sq->queue[sq->size])
752 p = (__be64 *)sq->queue;
758 if (++p == (__be64 *)&sq->queue[sq->size])
759 p = (__be64 *)sq->queue;
761 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
768 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
769 struct ib_send_wr **bad_wr)
773 enum fw_wr_opcodes fw_opcode = 0;
774 enum fw_ri_wr_flags fw_flags;
776 union t4_wr *wqe = NULL;
778 struct t4_swsqe *swsqe;
781 struct c4iw_rdev *rdev;
783 qhp = to_c4iw_qp(ibqp);
784 rdev = &qhp->rhp->rdev;
785 spin_lock_irqsave(&qhp->lock, flag);
786 if (t4_wq_in_error(&qhp->wq)) {
787 spin_unlock_irqrestore(&qhp->lock, flag);
788 complete_sq_drain_wr(qhp, wr);
791 num_wrs = t4_sq_avail(&qhp->wq);
793 spin_unlock_irqrestore(&qhp->lock, flag);
803 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
804 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
807 if (wr->send_flags & IB_SEND_SOLICITED)
808 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
809 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
810 fw_flags |= FW_RI_COMPLETION_FLAG;
811 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
812 switch (wr->opcode) {
813 case IB_WR_SEND_WITH_INV:
815 if (wr->send_flags & IB_SEND_FENCE)
816 fw_flags |= FW_RI_READ_FENCE_FLAG;
817 fw_opcode = FW_RI_SEND_WR;
818 if (wr->opcode == IB_WR_SEND)
819 swsqe->opcode = FW_RI_SEND;
821 swsqe->opcode = FW_RI_SEND_WITH_INV;
822 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
824 case IB_WR_RDMA_WRITE:
825 fw_opcode = FW_RI_RDMA_WRITE_WR;
826 swsqe->opcode = FW_RI_RDMA_WRITE;
827 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
829 case IB_WR_RDMA_READ:
830 case IB_WR_RDMA_READ_WITH_INV:
831 fw_opcode = FW_RI_RDMA_READ_WR;
832 swsqe->opcode = FW_RI_READ_REQ;
833 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
834 c4iw_invalidate_mr(qhp->rhp,
835 wr->sg_list[0].lkey);
836 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
840 err = build_rdma_read(wqe, wr, &len16);
843 swsqe->read_len = wr->sg_list[0].length;
844 if (!qhp->wq.sq.oldest_read)
845 qhp->wq.sq.oldest_read = swsqe;
848 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
850 swsqe->opcode = FW_RI_FAST_REGISTER;
851 if (rdev->adap->params.fr_nsmr_tpte_wr_support &&
852 !mhp->attr.state && mhp->mpl_len <= 2) {
853 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
854 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
857 fw_opcode = FW_RI_FR_NSMR_WR;
858 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
860 rdev->adap->params.ulptx_memwrite_dsgl);
867 case IB_WR_LOCAL_INV:
868 if (wr->send_flags & IB_SEND_FENCE)
869 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
870 fw_opcode = FW_RI_INV_LSTAG_WR;
871 swsqe->opcode = FW_RI_LOCAL_INV;
872 err = build_inv_stag(wqe, wr, &len16);
873 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
876 CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__,
884 swsqe->idx = qhp->wq.sq.pidx;
886 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
889 swsqe->wr_id = wr->wr_id;
891 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
894 "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u",
895 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
896 swsqe->opcode, swsqe->read_len);
899 t4_sq_produce(&qhp->wq, len16);
900 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
903 t4_ring_sq_db(&qhp->wq, idx, wqe, rdev->adap->iwt.wc_en);
904 spin_unlock_irqrestore(&qhp->lock, flag);
908 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
909 struct ib_recv_wr **bad_wr)
913 union t4_recv_wr *wqe = NULL;
919 qhp = to_c4iw_qp(ibqp);
920 spin_lock_irqsave(&qhp->lock, flag);
921 if (t4_wq_in_error(&qhp->wq)) {
922 spin_unlock_irqrestore(&qhp->lock, flag);
923 complete_rq_drain_wr(qhp, wr);
926 num_wrs = t4_rq_avail(&qhp->wq);
928 spin_unlock_irqrestore(&qhp->lock, flag);
933 if (wr->num_sge > T4_MAX_RECV_SGE) {
938 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
942 err = build_rdma_recv(qhp, wqe, wr, &len16);
950 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
952 wqe->recv.opcode = FW_RI_RECV_WR;
954 wqe->recv.wrid = qhp->wq.rq.pidx;
958 wqe->recv.len16 = len16;
959 CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__,
960 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
961 t4_rq_produce(&qhp->wq, len16);
962 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
967 t4_ring_rq_db(&qhp->wq, idx, wqe, qhp->rhp->rdev.adap->iwt.wc_en);
968 spin_unlock_irqrestore(&qhp->lock, flag);
972 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
982 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
987 status = CQE_STATUS(err_cqe);
988 opcode = CQE_OPCODE(err_cqe);
989 rqtype = RQ_TYPE(err_cqe);
990 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
991 (opcode == FW_RI_SEND_WITH_SE_INV);
992 tagged = (opcode == FW_RI_RDMA_WRITE) ||
993 (rqtype && (opcode == FW_RI_READ_RESP));
998 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
999 *ecode = RDMAP_CANT_INV_STAG;
1001 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1002 *ecode = RDMAP_INV_STAG;
1006 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1007 if ((opcode == FW_RI_SEND_WITH_INV) ||
1008 (opcode == FW_RI_SEND_WITH_SE_INV))
1009 *ecode = RDMAP_CANT_INV_STAG;
1011 *ecode = RDMAP_STAG_NOT_ASSOC;
1014 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1015 *ecode = RDMAP_STAG_NOT_ASSOC;
1018 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1019 *ecode = RDMAP_ACC_VIOL;
1022 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1023 *ecode = RDMAP_TO_WRAP;
1027 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1028 *ecode = DDPT_BASE_BOUNDS;
1030 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1031 *ecode = RDMAP_BASE_BOUNDS;
1034 case T4_ERR_INVALIDATE_SHARED_MR:
1035 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1036 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1037 *ecode = RDMAP_CANT_INV_STAG;
1040 case T4_ERR_ECC_PSTAG:
1041 case T4_ERR_INTERNAL_ERR:
1042 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1045 case T4_ERR_OUT_OF_RQE:
1046 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1047 *ecode = DDPU_INV_MSN_NOBUF;
1049 case T4_ERR_PBL_ADDR_BOUND:
1050 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1051 *ecode = DDPT_BASE_BOUNDS;
1054 *layer_type = LAYER_MPA|DDP_LLP;
1055 *ecode = MPA_CRC_ERR;
1058 *layer_type = LAYER_MPA|DDP_LLP;
1059 *ecode = MPA_MARKER_ERR;
1061 case T4_ERR_PDU_LEN_ERR:
1062 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1063 *ecode = DDPU_MSG_TOOBIG;
1065 case T4_ERR_DDP_VERSION:
1067 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1068 *ecode = DDPT_INV_VERS;
1070 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1071 *ecode = DDPU_INV_VERS;
1074 case T4_ERR_RDMA_VERSION:
1075 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1076 *ecode = RDMAP_INV_VERS;
1079 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1080 *ecode = RDMAP_INV_OPCODE;
1082 case T4_ERR_DDP_QUEUE_NUM:
1083 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1084 *ecode = DDPU_INV_QN;
1087 case T4_ERR_MSN_GAP:
1088 case T4_ERR_MSN_RANGE:
1089 case T4_ERR_IRD_OVERFLOW:
1090 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1091 *ecode = DDPU_INV_MSN_RANGE;
1094 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1098 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1099 *ecode = DDPU_INV_MO;
1102 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1108 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1112 struct fw_ri_wr *wqe;
1113 struct terminate_message *term;
1115 struct socket *so = qhp->ep->com.so;
1116 struct inpcb *inp = sotoinpcb(so);
1117 struct tcpcb *tp = intotcpcb(inp);
1118 struct toepcb *toep = tp->t_toe;
1120 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
1121 qhp->wq.sq.qid, qhp->ep->hwtid);
1123 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
1128 memset(wqe, 0, sizeof *wqe);
1129 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR));
1130 wqe->flowid_len16 = cpu_to_be32(
1131 V_FW_WR_FLOWID(qhp->ep->hwtid) |
1132 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1134 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1135 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1136 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1137 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1138 term->layer_etype = qhp->attr.layer_etype;
1139 term->ecode = qhp->attr.ecode;
1141 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1142 ret = creds(toep, inp, sizeof(*wqe));
1147 t4_wrq_tx(qhp->rhp->rdev.adap, wr);
1150 /* Assumes qhp lock is held. */
1151 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1152 struct c4iw_cq *schp)
1155 int rq_flushed, sq_flushed;
1158 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp,
1161 /* locking hierarchy: cq lock first, then qp lock. */
1162 spin_lock_irqsave(&rchp->lock, flag);
1163 spin_lock(&qhp->lock);
1165 if (qhp->wq.flushed) {
1166 spin_unlock(&qhp->lock);
1167 spin_unlock_irqrestore(&rchp->lock, flag);
1170 qhp->wq.flushed = 1;
1172 c4iw_flush_hw_cq(rchp);
1173 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1174 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1175 spin_unlock(&qhp->lock);
1176 spin_unlock_irqrestore(&rchp->lock, flag);
1178 /* locking hierarchy: cq lock first, then qp lock. */
1179 spin_lock_irqsave(&schp->lock, flag);
1180 spin_lock(&qhp->lock);
1182 c4iw_flush_hw_cq(schp);
1183 sq_flushed = c4iw_flush_sq(qhp);
1184 spin_unlock(&qhp->lock);
1185 spin_unlock_irqrestore(&schp->lock, flag);
1188 if (t4_clear_cq_armed(&rchp->cq) &&
1189 (rq_flushed || sq_flushed)) {
1190 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1191 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1192 rchp->ibcq.cq_context);
1193 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1196 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1197 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1198 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1199 rchp->ibcq.cq_context);
1200 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1202 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1203 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1204 (*schp->ibcq.comp_handler)(&schp->ibcq,
1205 schp->ibcq.cq_context);
1206 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1211 static void flush_qp(struct c4iw_qp *qhp)
1213 struct c4iw_cq *rchp, *schp;
1216 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1217 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1219 t4_set_wq_in_error(&qhp->wq);
1220 if (qhp->ibqp.uobject) {
1221 t4_set_cq_in_error(&rchp->cq);
1222 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1223 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1224 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1226 t4_set_cq_in_error(&schp->cq);
1227 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1228 (*schp->ibcq.comp_handler)(&schp->ibcq,
1229 schp->ibcq.cq_context);
1230 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1234 __flush_qp(qhp, rchp, schp);
1238 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
1240 struct c4iw_rdev *rdev = &rhp->rdev;
1241 struct adapter *sc = rdev->adap;
1242 struct fw_ri_wr *wqe;
1245 struct socket *so = ep->com.so;
1246 struct inpcb *inp = sotoinpcb(so);
1247 struct tcpcb *tp = intotcpcb(inp);
1248 struct toepcb *toep = tp->t_toe;
1250 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
1252 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
1253 qhp->wq.sq.qid, ep, ep->hwtid);
1255 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
1260 memset(wqe, 0, sizeof *wqe);
1262 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL);
1263 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1264 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1265 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1266 wqe->u.fini.type = FW_RI_TYPE_FINI;
1268 c4iw_init_wr_wait(&ep->com.wr_wait);
1270 ret = creds(toep, inp, sizeof(*wqe));
1277 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
1278 qhp->wq.sq.qid, ep->com.so, __func__);
1282 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1284 CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type);
1285 memset(&init->u, 0, sizeof init->u);
1287 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1288 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1289 init->u.write.stag_sink = cpu_to_be32(1);
1290 init->u.write.to_sink = cpu_to_be64(1);
1291 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1292 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1293 sizeof(struct fw_ri_immd),
1296 case FW_RI_INIT_P2PTYPE_READ_REQ:
1297 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1298 init->u.read.stag_src = cpu_to_be32(1);
1299 init->u.read.to_src_lo = cpu_to_be32(1);
1300 init->u.read.stag_sink = cpu_to_be32(1);
1301 init->u.read.to_sink_lo = cpu_to_be32(1);
1302 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1308 creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
1310 struct ofld_tx_sdesc *txsd;
1312 CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize);
1314 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) {
1318 txsd = &toep->txsd[toep->txsd_pidx];
1319 txsd->tx_credits = howmany(wrsize, 16);
1321 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
1322 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
1323 toep->tx_credits -= txsd->tx_credits;
1324 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
1325 toep->txsd_pidx = 0;
1328 CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep ,
1329 txsd->tx_credits, toep->tx_credits, toep->txsd_pidx);
1333 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1335 struct fw_ri_wr *wqe;
1338 struct c4iw_ep *ep = qhp->ep;
1339 struct c4iw_rdev *rdev = &qhp->rhp->rdev;
1340 struct adapter *sc = rdev->adap;
1341 struct socket *so = ep->com.so;
1342 struct inpcb *inp = sotoinpcb(so);
1343 struct tcpcb *tp = intotcpcb(inp);
1344 struct toepcb *toep = tp->t_toe;
1346 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
1347 qhp->wq.sq.qid, ep, ep->hwtid);
1349 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
1353 ret = alloc_ird(rhp, qhp->attr.max_ird);
1355 qhp->attr.max_ird = 0;
1360 memset(wqe, 0, sizeof *wqe);
1362 wqe->op_compl = cpu_to_be32(
1363 V_FW_WR_OP(FW_RI_WR) |
1365 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1366 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1368 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1370 wqe->u.init.type = FW_RI_TYPE_INIT;
1371 wqe->u.init.mpareqbit_p2ptype =
1372 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1373 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1374 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1375 if (qhp->attr.mpa_attr.recv_marker_enabled)
1376 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1377 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1378 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1379 if (qhp->attr.mpa_attr.crc_enabled)
1380 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1382 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1383 FW_RI_QP_RDMA_WRITE_ENABLE |
1384 FW_RI_QP_BIND_ENABLE;
1385 if (!qhp->ibqp.uobject)
1386 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1387 FW_RI_QP_STAG0_ENABLE;
1388 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1389 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1390 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1391 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1392 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1393 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1394 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1395 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1396 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1397 wqe->u.init.iss = cpu_to_be32(ep->snd_seq);
1398 wqe->u.init.irs = cpu_to_be32(ep->rcv_seq);
1399 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1400 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1402 if (qhp->attr.mpa_attr.initiator)
1403 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1405 c4iw_init_wr_wait(&ep->com.wr_wait);
1407 ret = creds(toep, inp, sizeof(*wqe));
1410 free_ird(rhp, qhp->attr.max_ird);
1415 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
1416 qhp->wq.sq.qid, ep->com.so, __func__);
1418 toep->ulp_mode = ULP_MODE_RDMA;
1419 free_ird(rhp, qhp->attr.max_ird);
1424 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1425 enum c4iw_qp_attr_mask mask,
1426 struct c4iw_qp_attributes *attrs,
1430 struct c4iw_qp_attributes newattr = qhp->attr;
1435 struct c4iw_ep *ep = NULL;
1437 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp,
1438 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep);
1439 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state,
1440 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1442 mutex_lock(&qhp->mutex);
1444 /* Process attr changes if in IDLE */
1445 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1446 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1450 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1451 newattr.enable_rdma_read = attrs->enable_rdma_read;
1452 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1453 newattr.enable_rdma_write = attrs->enable_rdma_write;
1454 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1455 newattr.enable_bind = attrs->enable_bind;
1456 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1457 if (attrs->max_ord > c4iw_max_read_depth) {
1461 newattr.max_ord = attrs->max_ord;
1463 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1464 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1468 newattr.max_ird = attrs->max_ird;
1470 qhp->attr = newattr;
1473 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1475 if (qhp->attr.state == attrs->next_state)
1478 /* Return EINPROGRESS if QP is already in transition state.
1479 * Eg: CLOSING->IDLE transition or *->ERROR transition.
1480 * This can happen while connection is switching(due to rdma_fini)
1481 * from iWARP/RDDP to TOE mode and any inflight RDMA RX data will
1482 * reach TOE driver -> TCP stack -> iWARP driver. In this way
1483 * iWARP driver keep receiving inflight RDMA RX data until socket
1484 * is closed or aborted. And if iWARP CM is in FPDU sate, then
1485 * it tries to put QP in TERM state and disconnects endpoint.
1486 * But as QP is already in transition state, this event is ignored.
1488 if ((qhp->attr.state >= C4IW_QP_STATE_ERROR) &&
1489 (attrs->next_state == C4IW_QP_STATE_TERMINATE)) {
1494 switch (qhp->attr.state) {
1495 case C4IW_QP_STATE_IDLE:
1496 switch (attrs->next_state) {
1497 case C4IW_QP_STATE_RTS:
1498 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1502 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1506 qhp->attr.mpa_attr = attrs->mpa_attr;
1507 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1508 qhp->ep = qhp->attr.llp_stream_handle;
1509 set_state(qhp, C4IW_QP_STATE_RTS);
1512 * Ref the endpoint here and deref when we
1513 * disassociate the endpoint from the QP. This
1514 * happens in CLOSING->IDLE transition or *->ERROR
1517 c4iw_get_ep(&qhp->ep->com);
1518 ret = rdma_init(rhp, qhp);
1522 case C4IW_QP_STATE_ERROR:
1523 set_state(qhp, C4IW_QP_STATE_ERROR);
1531 case C4IW_QP_STATE_RTS:
1532 switch (attrs->next_state) {
1533 case C4IW_QP_STATE_CLOSING:
1534 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1535 t4_set_wq_in_error(&qhp->wq);
1536 set_state(qhp, C4IW_QP_STATE_CLOSING);
1541 c4iw_get_ep(&qhp->ep->com);
1543 ret = rdma_fini(rhp, qhp, ep);
1547 case C4IW_QP_STATE_TERMINATE:
1548 t4_set_wq_in_error(&qhp->wq);
1549 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1550 qhp->attr.layer_etype = attrs->layer_etype;
1551 qhp->attr.ecode = attrs->ecode;
1554 c4iw_get_ep(&qhp->ep->com);
1558 terminate = qhp->attr.send_term;
1559 ret = rdma_fini(rhp, qhp, ep);
1564 case C4IW_QP_STATE_ERROR:
1565 t4_set_wq_in_error(&qhp->wq);
1566 set_state(qhp, C4IW_QP_STATE_ERROR);
1571 c4iw_get_ep(&qhp->ep->com);
1580 case C4IW_QP_STATE_CLOSING:
1583 * Allow kernel users to move to ERROR for qp draining.
1585 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1586 C4IW_QP_STATE_ERROR)) {
1590 switch (attrs->next_state) {
1591 case C4IW_QP_STATE_IDLE:
1593 set_state(qhp, C4IW_QP_STATE_IDLE);
1594 qhp->attr.llp_stream_handle = NULL;
1595 c4iw_put_ep(&qhp->ep->com);
1597 wake_up(&qhp->wait);
1599 case C4IW_QP_STATE_ERROR:
1606 case C4IW_QP_STATE_ERROR:
1607 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1611 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1615 set_state(qhp, C4IW_QP_STATE_IDLE);
1617 case C4IW_QP_STATE_TERMINATE:
1625 printf("%s in a bad state %d\n",
1626 __func__, qhp->attr.state);
1633 CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__,
1634 qhp->ep, qhp->wq.sq.qid);
1636 /* disassociate the LLP connection */
1637 qhp->attr.llp_stream_handle = NULL;
1641 set_state(qhp, C4IW_QP_STATE_ERROR);
1646 wake_up(&qhp->wait);
1648 mutex_unlock(&qhp->mutex);
1651 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1654 * If disconnect is 1, then we need to initiate a disconnect
1655 * on the EP. This can be a normal close (RTS->CLOSING) or
1656 * an abnormal close (RTS/CLOSING->ERROR).
1659 __c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1661 c4iw_put_ep(&ep->com);
1665 * If free is 1, then we've disassociated the EP from the QP
1666 * and we need to dereference the EP.
1669 c4iw_put_ep(&ep->com);
1670 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state);
1674 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1676 struct c4iw_dev *rhp;
1677 struct c4iw_qp *qhp;
1678 struct c4iw_qp_attributes attrs;
1680 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp);
1681 qhp = to_c4iw_qp(ib_qp);
1684 attrs.next_state = C4IW_QP_STATE_ERROR;
1685 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1686 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1688 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1689 wait_event(qhp->wait, !qhp->ep);
1691 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1693 free_ird(rhp, qhp->attr.max_ird);
1694 c4iw_qp_rem_ref(ib_qp);
1696 CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp,
1702 c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1703 struct ib_udata *udata)
1705 struct c4iw_dev *rhp;
1706 struct c4iw_qp *qhp;
1707 struct c4iw_pd *php;
1708 struct c4iw_cq *schp;
1709 struct c4iw_cq *rchp;
1710 struct c4iw_create_qp_resp uresp;
1711 unsigned int sqsize, rqsize;
1712 struct c4iw_ucontext *ucontext;
1714 struct c4iw_mm_entry *sq_key_mm = NULL, *rq_key_mm = NULL;
1715 struct c4iw_mm_entry *sq_db_key_mm = NULL, *rq_db_key_mm = NULL;
1717 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
1719 if (attrs->qp_type != IB_QPT_RC)
1720 return ERR_PTR(-EINVAL);
1722 php = to_c4iw_pd(pd);
1724 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1725 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1727 return ERR_PTR(-EINVAL);
1729 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1730 return ERR_PTR(-EINVAL);
1732 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1733 return ERR_PTR(-E2BIG);
1734 rqsize = attrs->cap.max_recv_wr + 1;
1738 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1739 return ERR_PTR(-E2BIG);
1740 sqsize = attrs->cap.max_send_wr + 1;
1744 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1746 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1748 return ERR_PTR(-ENOMEM);
1749 qhp->wq.sq.size = sqsize;
1750 qhp->wq.sq.memsize =
1751 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1752 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1753 qhp->wq.sq.flush_cidx = -1;
1754 qhp->wq.rq.size = rqsize;
1755 qhp->wq.rq.memsize =
1756 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1757 sizeof(*qhp->wq.rq.queue);
1760 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1761 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1764 CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu",
1765 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1767 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1768 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1772 attrs->cap.max_recv_wr = rqsize - 1;
1773 attrs->cap.max_send_wr = sqsize - 1;
1774 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1777 qhp->attr.pd = php->pdid;
1778 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1779 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1780 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1781 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1782 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1783 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1784 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1785 qhp->attr.state = C4IW_QP_STATE_IDLE;
1786 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1787 qhp->attr.enable_rdma_read = 1;
1788 qhp->attr.enable_rdma_write = 1;
1789 qhp->attr.enable_bind = 1;
1790 qhp->attr.max_ord = 0;
1791 qhp->attr.max_ird = 0;
1792 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1793 spin_lock_init(&qhp->lock);
1794 mutex_init(&qhp->mutex);
1795 init_waitqueue_head(&qhp->wait);
1796 kref_init(&qhp->kref);
1797 INIT_WORK(&qhp->free_work, free_qp_work);
1799 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1804 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
1809 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
1814 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
1815 if (!sq_db_key_mm) {
1819 rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
1820 if (!rq_db_key_mm) {
1825 uresp.qid_mask = rhp->rdev.qpmask;
1826 uresp.sqid = qhp->wq.sq.qid;
1827 uresp.sq_size = qhp->wq.sq.size;
1828 uresp.sq_memsize = qhp->wq.sq.memsize;
1829 uresp.rqid = qhp->wq.rq.qid;
1830 uresp.rq_size = qhp->wq.rq.size;
1831 uresp.rq_memsize = qhp->wq.rq.memsize;
1832 spin_lock(&ucontext->mmap_lock);
1833 uresp.ma_sync_key = 0;
1834 uresp.sq_key = ucontext->key;
1835 ucontext->key += PAGE_SIZE;
1836 uresp.rq_key = ucontext->key;
1837 ucontext->key += PAGE_SIZE;
1838 uresp.sq_db_gts_key = ucontext->key;
1839 ucontext->key += PAGE_SIZE;
1840 uresp.rq_db_gts_key = ucontext->key;
1841 ucontext->key += PAGE_SIZE;
1842 spin_unlock(&ucontext->mmap_lock);
1843 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1846 sq_key_mm->key = uresp.sq_key;
1847 sq_key_mm->addr = qhp->wq.sq.phys_addr;
1848 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1849 CTR4(KTR_IW_CXGBE, "%s sq_key_mm %x, %x, %d", __func__,
1850 sq_key_mm->key, sq_key_mm->addr,
1852 insert_mmap(ucontext, sq_key_mm);
1853 rq_key_mm->key = uresp.rq_key;
1854 rq_key_mm->addr = qhp->wq.rq.phys_addr;
1855 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1856 CTR4(KTR_IW_CXGBE, "%s rq_key_mm %x, %x, %d", __func__,
1857 rq_key_mm->key, rq_key_mm->addr,
1859 insert_mmap(ucontext, rq_key_mm);
1860 sq_db_key_mm->key = uresp.sq_db_gts_key;
1861 sq_db_key_mm->addr = (u64)qhp->wq.sq.bar2_pa;
1862 sq_db_key_mm->len = PAGE_SIZE;
1863 CTR4(KTR_IW_CXGBE, "%s sq_db_key_mm %x, %x, %d", __func__,
1864 sq_db_key_mm->key, sq_db_key_mm->addr,
1866 insert_mmap(ucontext, sq_db_key_mm);
1867 rq_db_key_mm->key = uresp.rq_db_gts_key;
1868 rq_db_key_mm->addr = (u64)qhp->wq.rq.bar2_pa;
1869 rq_db_key_mm->len = PAGE_SIZE;
1870 CTR4(KTR_IW_CXGBE, "%s rq_db_key_mm %x, %x, %d", __func__,
1871 rq_db_key_mm->key, rq_db_key_mm->addr,
1873 insert_mmap(ucontext, rq_db_key_mm);
1875 c4iw_get_ucontext(ucontext);
1876 qhp->ucontext = ucontext;
1878 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1879 init_timer(&(qhp->timer));
1881 CTR5(KTR_IW_CXGBE, "%s sq id %u size %u memsize %zu num_entries %u",
1882 __func__, qhp->wq.sq.qid,
1883 qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr);
1884 CTR5(KTR_IW_CXGBE, "%s rq id %u size %u memsize %zu num_entries %u",
1885 __func__, qhp->wq.rq.qid,
1886 qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1889 kfree(rq_db_key_mm);
1891 kfree(sq_db_key_mm);
1897 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1899 destroy_qp(&rhp->rdev, &qhp->wq,
1900 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1903 return ERR_PTR(ret);
1906 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1907 int attr_mask, struct ib_udata *udata)
1909 struct c4iw_dev *rhp;
1910 struct c4iw_qp *qhp;
1911 enum c4iw_qp_attr_mask mask = 0;
1912 struct c4iw_qp_attributes attrs;
1914 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp);
1916 /* iwarp does not support the RTR state */
1917 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1918 attr_mask &= ~IB_QP_STATE;
1920 /* Make sure we still have something left to do */
1924 memset(&attrs, 0, sizeof attrs);
1925 qhp = to_c4iw_qp(ibqp);
1928 attrs.next_state = c4iw_convert_state(attr->qp_state);
1929 attrs.enable_rdma_read = (attr->qp_access_flags &
1930 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1931 attrs.enable_rdma_write = (attr->qp_access_flags &
1932 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1933 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1936 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1937 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1938 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1939 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1940 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1942 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1945 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1947 CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn);
1948 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1951 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1952 int attr_mask, struct ib_qp_init_attr *init_attr)
1954 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1956 memset(attr, 0, sizeof *attr);
1957 memset(init_attr, 0, sizeof *init_attr);
1958 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1959 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1960 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1961 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1962 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1963 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1964 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;