2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
47 #include <netinet/in_systm.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/tcp_var.h>
52 #include <netinet/tcp.h>
53 #include <netinet/tcpip.h>
55 #include <netinet/toecore.h>
59 #include <linux/types.h>
61 #include "tom/t4_tom.h"
66 static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
69 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
72 spin_lock_irqsave(&qhp->lock, flag);
73 qhp->attr.state = state;
74 spin_unlock_irqrestore(&qhp->lock, flag);
77 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
80 contigfree(sq->queue, sq->memsize, M_DEVBUF);
83 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
86 dealloc_host_sq(rdev, sq);
89 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
91 sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
95 sq->dma_addr = vtophys(sq->queue);
98 sq->phys_addr = vtophys(sq->queue);
99 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
100 CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__,
101 sq->queue, sq->dma_addr, sq->phys_addr);
105 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
106 struct c4iw_dev_ucontext *uctx)
109 * uP clears EQ contexts when the connection exits rdma mode,
110 * so no need to post a RESET WR for these EQs.
112 contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF);
113 dealloc_sq(rdev, &wq->sq);
114 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
117 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
118 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
122 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
123 struct t4_cq *rcq, struct t4_cq *scq,
124 struct c4iw_dev_ucontext *uctx)
126 struct adapter *sc = rdev->adap;
127 int user = (uctx != &rdev->uctx);
128 struct fw_ri_res_wr *res_wr;
129 struct fw_ri_res *res;
131 struct c4iw_wr_wait wr_wait;
135 const int spg_ndesc = sc->params.sge.spg_len / EQ_ESIZE;
137 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
141 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
146 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
151 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
157 /* RQT must be a power of 2. */
158 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
159 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
160 if (!wq->rq.rqt_hwaddr)
163 if (alloc_host_sq(rdev, &wq->sq))
166 memset(wq->sq.queue, 0, wq->sq.memsize);
167 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
169 wq->rq.queue = contigmalloc(wq->rq.memsize,
170 M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
172 wq->rq.dma_addr = vtophys(wq->rq.queue);
176 "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__,
177 wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue),
178 wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue));
179 memset(wq->rq.queue, 0, wq->rq.memsize);
180 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
182 wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
183 sc->sge_kdoorbell_reg);
184 wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res)
187 wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
188 (wq->sq.qid << rdev->qpshift));
189 wq->sq.udb &= PAGE_MASK;
190 wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
191 (wq->rq.qid << rdev->qpshift));
192 wq->rq.udb &= PAGE_MASK;
197 /* build fw_ri_res_wr */
198 wr_len = sizeof *res_wr + 2 * sizeof *res;
200 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
205 memset(res_wr, 0, wr_len);
206 res_wr->op_nres = cpu_to_be32(
207 V_FW_WR_OP(FW_RI_RES_WR) |
208 V_FW_RI_RES_WR_NRES(2) |
210 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
211 res_wr->cookie = (unsigned long) &wr_wait;
213 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
214 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
216 /* eqsize is the number of 64B entries plus the status page size. */
217 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_ndesc;
219 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
220 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
221 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
222 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
223 V_FW_RI_RES_WR_IQID(scq->cqid));
224 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
225 V_FW_RI_RES_WR_DCAEN(0) |
226 V_FW_RI_RES_WR_DCACPU(0) |
227 V_FW_RI_RES_WR_FBMIN(2) |
228 V_FW_RI_RES_WR_FBMAX(2) |
229 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
230 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
231 V_FW_RI_RES_WR_EQSIZE(eqsize));
232 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
233 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
235 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
236 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
238 /* eqsize is the number of 64B entries plus the status page size. */
239 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_ndesc;
240 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
241 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
242 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
243 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
244 V_FW_RI_RES_WR_IQID(rcq->cqid));
245 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
246 V_FW_RI_RES_WR_DCAEN(0) |
247 V_FW_RI_RES_WR_DCACPU(0) |
248 V_FW_RI_RES_WR_FBMIN(2) |
249 V_FW_RI_RES_WR_FBMAX(2) |
250 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
251 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
252 V_FW_RI_RES_WR_EQSIZE(eqsize));
253 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
254 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
256 c4iw_init_wr_wait(&wr_wait);
259 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
264 "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
265 __func__, wq->sq.qid, wq->rq.qid, wq->db,
266 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
270 contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF);
272 dealloc_sq(rdev, &wq->sq);
274 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
280 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
282 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
286 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
287 struct ib_send_wr *wr, int max, u32 *plenp)
294 dstp = (u8 *)immdp->data;
295 for (i = 0; i < wr->num_sge; i++) {
296 if ((plen + wr->sg_list[i].length) > max)
298 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
299 plen += wr->sg_list[i].length;
300 rem = wr->sg_list[i].length;
302 if (dstp == (u8 *)&sq->queue[sq->size])
303 dstp = (u8 *)sq->queue;
304 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
307 len = (u8 *)&sq->queue[sq->size] - dstp;
308 memcpy(dstp, srcp, len);
314 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
316 memset(dstp, 0, len);
317 immdp->op = FW_RI_DATA_IMMD;
320 immdp->immdlen = cpu_to_be32(plen);
325 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
326 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
327 int num_sge, u32 *plenp)
332 __be64 *flitp = (__be64 *)isglp->sge;
334 for (i = 0; i < num_sge; i++) {
335 if ((plen + sg_list[i].length) < plen)
337 plen += sg_list[i].length;
338 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
340 if (++flitp == queue_end)
342 *flitp = cpu_to_be64(sg_list[i].addr);
343 if (++flitp == queue_end)
346 *flitp = (__force __be64)0;
347 isglp->op = FW_RI_DATA_ISGL;
349 isglp->nsge = cpu_to_be16(num_sge);
356 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
357 struct ib_send_wr *wr, u8 *len16)
363 if (wr->num_sge > T4_MAX_SEND_SGE)
365 switch (wr->opcode) {
367 if (wr->send_flags & IB_SEND_SOLICITED)
368 wqe->send.sendop_pkd = cpu_to_be32(
369 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
371 wqe->send.sendop_pkd = cpu_to_be32(
372 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
373 wqe->send.stag_inv = 0;
375 case IB_WR_SEND_WITH_INV:
376 if (wr->send_flags & IB_SEND_SOLICITED)
377 wqe->send.sendop_pkd = cpu_to_be32(
378 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
380 wqe->send.sendop_pkd = cpu_to_be32(
381 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
382 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
391 if (wr->send_flags & IB_SEND_INLINE) {
392 ret = build_immd(sq, wqe->send.u.immd_src, wr,
393 T4_MAX_SEND_INLINE, &plen);
396 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
399 ret = build_isgl((__be64 *)sq->queue,
400 (__be64 *)&sq->queue[sq->size],
401 wqe->send.u.isgl_src,
402 wr->sg_list, wr->num_sge, &plen);
405 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
406 wr->num_sge * sizeof(struct fw_ri_sge);
409 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
410 wqe->send.u.immd_src[0].r1 = 0;
411 wqe->send.u.immd_src[0].r2 = 0;
412 wqe->send.u.immd_src[0].immdlen = 0;
413 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
416 *len16 = DIV_ROUND_UP(size, 16);
417 wqe->send.plen = cpu_to_be32(plen);
421 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
422 struct ib_send_wr *wr, u8 *len16)
428 if (wr->num_sge > T4_MAX_SEND_SGE)
431 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
432 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
434 if (wr->send_flags & IB_SEND_INLINE) {
435 ret = build_immd(sq, wqe->write.u.immd_src, wr,
436 T4_MAX_WRITE_INLINE, &plen);
439 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
442 ret = build_isgl((__be64 *)sq->queue,
443 (__be64 *)&sq->queue[sq->size],
444 wqe->write.u.isgl_src,
445 wr->sg_list, wr->num_sge, &plen);
448 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
449 wr->num_sge * sizeof(struct fw_ri_sge);
452 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
453 wqe->write.u.immd_src[0].r1 = 0;
454 wqe->write.u.immd_src[0].r2 = 0;
455 wqe->write.u.immd_src[0].immdlen = 0;
456 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
459 *len16 = DIV_ROUND_UP(size, 16);
460 wqe->write.plen = cpu_to_be32(plen);
464 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
469 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
470 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
472 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
473 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
474 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
475 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
477 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
479 wqe->read.stag_src = cpu_to_be32(2);
480 wqe->read.to_src_hi = 0;
481 wqe->read.to_src_lo = 0;
482 wqe->read.stag_sink = cpu_to_be32(2);
484 wqe->read.to_sink_hi = 0;
485 wqe->read.to_sink_lo = 0;
489 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
493 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
494 struct ib_recv_wr *wr, u8 *len16)
498 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
499 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
500 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
503 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
504 wr->num_sge * sizeof(struct fw_ri_sge), 16);
508 static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
509 struct ib_send_wr *wr, u8 *len16)
512 struct fw_ri_immd *imdp;
515 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
518 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
521 wqe->fr.qpbinde_to_dcacpu = 0;
522 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
523 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
524 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
526 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
527 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
528 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
529 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
531 WARN_ON(pbllen > T4_MAX_FR_IMMD);
532 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
533 imdp->op = FW_RI_DATA_IMMD;
536 imdp->immdlen = cpu_to_be32(pbllen);
537 p = (__be64 *)(imdp + 1);
539 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
540 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
542 if (++p == (__be64 *)&sq->queue[sq->size])
543 p = (__be64 *)sq->queue;
549 if (++p == (__be64 *)&sq->queue[sq->size])
550 p = (__be64 *)sq->queue;
552 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
556 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
559 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
561 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
565 void c4iw_qp_add_ref(struct ib_qp *qp)
567 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
568 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
571 void c4iw_qp_rem_ref(struct ib_qp *qp)
573 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
574 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
575 wake_up(&(to_c4iw_qp(qp)->wait));
578 static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
580 struct t4_cqe cqe = {};
581 struct c4iw_cq *schp;
585 schp = to_c4iw_cq(qhp->ibqp.send_cq);
588 PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
589 cqe.u.drain_cookie = wr->wr_id;
590 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
591 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
594 V_CQE_QPID(qhp->wq.sq.qid));
596 spin_lock_irqsave(&schp->lock, flag);
597 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
598 cq->sw_queue[cq->sw_pidx] = cqe;
600 spin_unlock_irqrestore(&schp->lock, flag);
602 spin_lock_irqsave(&schp->comp_handler_lock, flag);
603 (*schp->ibcq.comp_handler)(&schp->ibcq,
604 schp->ibcq.cq_context);
605 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
608 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
610 struct t4_cqe cqe = {};
611 struct c4iw_cq *rchp;
615 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
618 PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
619 cqe.u.drain_cookie = wr->wr_id;
620 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
621 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
624 V_CQE_QPID(qhp->wq.sq.qid));
626 spin_lock_irqsave(&rchp->lock, flag);
627 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
628 cq->sw_queue[cq->sw_pidx] = cqe;
630 spin_unlock_irqrestore(&rchp->lock, flag);
632 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
633 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
634 rchp->ibcq.cq_context);
635 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
638 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
639 struct ib_send_wr **bad_wr)
643 enum fw_wr_opcodes fw_opcode = 0;
644 enum fw_ri_wr_flags fw_flags;
648 struct t4_swsqe *swsqe;
652 qhp = to_c4iw_qp(ibqp);
653 spin_lock_irqsave(&qhp->lock, flag);
654 if (t4_wq_in_error(&qhp->wq)) {
655 spin_unlock_irqrestore(&qhp->lock, flag);
656 complete_sq_drain_wr(qhp, wr);
659 num_wrs = t4_sq_avail(&qhp->wq);
661 spin_unlock_irqrestore(&qhp->lock, flag);
670 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
671 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
674 if (wr->send_flags & IB_SEND_SOLICITED)
675 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
676 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
677 fw_flags |= FW_RI_COMPLETION_FLAG;
678 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
679 switch (wr->opcode) {
680 case IB_WR_SEND_WITH_INV:
682 if (wr->send_flags & IB_SEND_FENCE)
683 fw_flags |= FW_RI_READ_FENCE_FLAG;
684 fw_opcode = FW_RI_SEND_WR;
685 if (wr->opcode == IB_WR_SEND)
686 swsqe->opcode = FW_RI_SEND;
688 swsqe->opcode = FW_RI_SEND_WITH_INV;
689 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
691 case IB_WR_RDMA_WRITE:
692 fw_opcode = FW_RI_RDMA_WRITE_WR;
693 swsqe->opcode = FW_RI_RDMA_WRITE;
694 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
696 case IB_WR_RDMA_READ:
697 case IB_WR_RDMA_READ_WITH_INV:
698 fw_opcode = FW_RI_RDMA_READ_WR;
699 swsqe->opcode = FW_RI_READ_REQ;
700 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
701 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
704 err = build_rdma_read(wqe, wr, &len16);
707 swsqe->read_len = wr->sg_list[0].length;
708 if (!qhp->wq.sq.oldest_read)
709 qhp->wq.sq.oldest_read = swsqe;
711 case IB_WR_FAST_REG_MR:
712 fw_opcode = FW_RI_FR_NSMR_WR;
713 swsqe->opcode = FW_RI_FAST_REGISTER;
714 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
716 case IB_WR_LOCAL_INV:
717 if (wr->send_flags & IB_SEND_FENCE)
718 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
719 fw_opcode = FW_RI_INV_LSTAG_WR;
720 swsqe->opcode = FW_RI_LOCAL_INV;
721 err = build_inv_stag(wqe, wr, &len16);
724 CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__,
732 swsqe->idx = qhp->wq.sq.pidx;
734 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
736 swsqe->wr_id = wr->wr_id;
738 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
741 "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u",
742 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
743 swsqe->opcode, swsqe->read_len);
746 t4_sq_produce(&qhp->wq, len16);
747 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
750 t4_ring_sq_db(&qhp->wq, idx);
751 spin_unlock_irqrestore(&qhp->lock, flag);
755 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
756 struct ib_recv_wr **bad_wr)
760 union t4_recv_wr *wqe;
766 qhp = to_c4iw_qp(ibqp);
767 spin_lock_irqsave(&qhp->lock, flag);
768 if (t4_wq_in_error(&qhp->wq)) {
769 spin_unlock_irqrestore(&qhp->lock, flag);
770 complete_rq_drain_wr(qhp, wr);
773 num_wrs = t4_rq_avail(&qhp->wq);
775 spin_unlock_irqrestore(&qhp->lock, flag);
779 if (wr->num_sge > T4_MAX_RECV_SGE) {
784 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
788 err = build_rdma_recv(qhp, wqe, wr, &len16);
796 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
798 wqe->recv.opcode = FW_RI_RECV_WR;
800 wqe->recv.wrid = qhp->wq.rq.pidx;
804 wqe->recv.len16 = len16;
805 CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__,
806 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
807 t4_rq_produce(&qhp->wq, len16);
808 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
813 t4_ring_rq_db(&qhp->wq, idx);
814 spin_unlock_irqrestore(&qhp->lock, flag);
818 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
823 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
833 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
838 status = CQE_STATUS(err_cqe);
839 opcode = CQE_OPCODE(err_cqe);
840 rqtype = RQ_TYPE(err_cqe);
841 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
842 (opcode == FW_RI_SEND_WITH_SE_INV);
843 tagged = (opcode == FW_RI_RDMA_WRITE) ||
844 (rqtype && (opcode == FW_RI_READ_RESP));
849 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
850 *ecode = RDMAP_CANT_INV_STAG;
852 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
853 *ecode = RDMAP_INV_STAG;
857 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
858 if ((opcode == FW_RI_SEND_WITH_INV) ||
859 (opcode == FW_RI_SEND_WITH_SE_INV))
860 *ecode = RDMAP_CANT_INV_STAG;
862 *ecode = RDMAP_STAG_NOT_ASSOC;
865 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
866 *ecode = RDMAP_STAG_NOT_ASSOC;
869 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
870 *ecode = RDMAP_ACC_VIOL;
873 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
874 *ecode = RDMAP_TO_WRAP;
878 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
879 *ecode = DDPT_BASE_BOUNDS;
881 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
882 *ecode = RDMAP_BASE_BOUNDS;
885 case T4_ERR_INVALIDATE_SHARED_MR:
886 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
887 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
888 *ecode = RDMAP_CANT_INV_STAG;
891 case T4_ERR_ECC_PSTAG:
892 case T4_ERR_INTERNAL_ERR:
893 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
896 case T4_ERR_OUT_OF_RQE:
897 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
898 *ecode = DDPU_INV_MSN_NOBUF;
900 case T4_ERR_PBL_ADDR_BOUND:
901 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
902 *ecode = DDPT_BASE_BOUNDS;
905 *layer_type = LAYER_MPA|DDP_LLP;
906 *ecode = MPA_CRC_ERR;
909 *layer_type = LAYER_MPA|DDP_LLP;
910 *ecode = MPA_MARKER_ERR;
912 case T4_ERR_PDU_LEN_ERR:
913 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
914 *ecode = DDPU_MSG_TOOBIG;
916 case T4_ERR_DDP_VERSION:
918 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
919 *ecode = DDPT_INV_VERS;
921 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
922 *ecode = DDPU_INV_VERS;
925 case T4_ERR_RDMA_VERSION:
926 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
927 *ecode = RDMAP_INV_VERS;
930 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
931 *ecode = RDMAP_INV_OPCODE;
933 case T4_ERR_DDP_QUEUE_NUM:
934 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
935 *ecode = DDPU_INV_QN;
939 case T4_ERR_MSN_RANGE:
940 case T4_ERR_IRD_OVERFLOW:
941 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
942 *ecode = DDPU_INV_MSN_RANGE;
945 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
949 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
950 *ecode = DDPU_INV_MO;
953 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
959 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
963 struct fw_ri_wr *wqe;
964 struct terminate_message *term;
966 struct socket *so = qhp->ep->com.so;
967 struct inpcb *inp = sotoinpcb(so);
968 struct tcpcb *tp = intotcpcb(inp);
969 struct toepcb *toep = tp->t_toe;
971 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
972 qhp->wq.sq.qid, qhp->ep->hwtid);
974 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
979 memset(wqe, 0, sizeof *wqe);
980 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR));
981 wqe->flowid_len16 = cpu_to_be32(
982 V_FW_WR_FLOWID(qhp->ep->hwtid) |
983 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
985 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
986 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
987 term = (struct terminate_message *)wqe->u.terminate.termmsg;
988 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
989 term->layer_etype = qhp->attr.layer_etype;
990 term->ecode = qhp->attr.ecode;
992 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
993 ret = creds(toep, inp, sizeof(*wqe));
998 t4_wrq_tx(qhp->rhp->rdev.adap, wr);
1001 /* Assumes qhp lock is held. */
1002 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1003 struct c4iw_cq *schp)
1009 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp,
1012 /* locking hierarchy: cq lock first, then qp lock. */
1013 spin_lock_irqsave(&rchp->lock, flag);
1014 spin_lock(&qhp->lock);
1015 c4iw_flush_hw_cq(&rchp->cq);
1016 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1017 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1018 spin_unlock(&qhp->lock);
1019 spin_unlock_irqrestore(&rchp->lock, flag);
1020 if (flushed && rchp->ibcq.comp_handler) {
1021 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1022 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1023 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1026 /* locking hierarchy: cq lock first, then qp lock. */
1027 spin_lock_irqsave(&schp->lock, flag);
1028 spin_lock(&qhp->lock);
1029 c4iw_flush_hw_cq(&schp->cq);
1030 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
1031 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
1032 spin_unlock(&qhp->lock);
1033 spin_unlock_irqrestore(&schp->lock, flag);
1034 if (flushed && schp->ibcq.comp_handler) {
1035 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1036 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
1037 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1041 static void flush_qp(struct c4iw_qp *qhp)
1043 struct c4iw_cq *rchp, *schp;
1046 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
1047 schp = get_chp(qhp->rhp, qhp->attr.scq);
1049 if (qhp->ibqp.uobject) {
1050 t4_set_wq_in_error(&qhp->wq);
1051 t4_set_cq_in_error(&rchp->cq);
1052 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1053 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1054 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1056 t4_set_cq_in_error(&schp->cq);
1057 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1058 (*schp->ibcq.comp_handler)(&schp->ibcq,
1059 schp->ibcq.cq_context);
1060 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1064 __flush_qp(qhp, rchp, schp);
1068 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
1070 struct c4iw_rdev *rdev = &rhp->rdev;
1071 struct adapter *sc = rdev->adap;
1072 struct fw_ri_wr *wqe;
1075 struct socket *so = ep->com.so;
1076 struct inpcb *inp = sotoinpcb(so);
1077 struct tcpcb *tp = intotcpcb(inp);
1078 struct toepcb *toep = tp->t_toe;
1080 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
1082 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
1083 qhp->wq.sq.qid, ep->hwtid);
1085 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
1090 memset(wqe, 0, sizeof *wqe);
1092 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL);
1093 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1094 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1095 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1096 wqe->u.fini.type = FW_RI_TYPE_FINI;
1098 c4iw_init_wr_wait(&ep->com.wr_wait);
1100 ret = creds(toep, inp, sizeof(*wqe));
1107 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
1108 qhp->wq.sq.qid, __func__);
1112 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1114 CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type);
1115 memset(&init->u, 0, sizeof init->u);
1117 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1118 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1119 init->u.write.stag_sink = cpu_to_be32(1);
1120 init->u.write.to_sink = cpu_to_be64(1);
1121 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1122 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1123 sizeof(struct fw_ri_immd),
1126 case FW_RI_INIT_P2PTYPE_READ_REQ:
1127 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1128 init->u.read.stag_src = cpu_to_be32(1);
1129 init->u.read.to_src_lo = cpu_to_be32(1);
1130 init->u.read.stag_sink = cpu_to_be32(1);
1131 init->u.read.to_sink_lo = cpu_to_be32(1);
1132 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1138 creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
1140 struct ofld_tx_sdesc *txsd;
1142 CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize);
1144 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) {
1148 txsd = &toep->txsd[toep->txsd_pidx];
1149 txsd->tx_credits = howmany(wrsize, 16);
1151 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
1152 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
1153 toep->tx_credits -= txsd->tx_credits;
1154 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
1155 toep->txsd_pidx = 0;
1158 CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep ,
1159 txsd->tx_credits, toep->tx_credits, toep->txsd_pidx);
1163 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1165 struct fw_ri_wr *wqe;
1168 struct c4iw_ep *ep = qhp->ep;
1169 struct c4iw_rdev *rdev = &qhp->rhp->rdev;
1170 struct adapter *sc = rdev->adap;
1171 struct socket *so = ep->com.so;
1172 struct inpcb *inp = sotoinpcb(so);
1173 struct tcpcb *tp = intotcpcb(inp);
1174 struct toepcb *toep = tp->t_toe;
1176 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
1177 qhp->wq.sq.qid, ep->hwtid);
1179 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
1184 memset(wqe, 0, sizeof *wqe);
1186 wqe->op_compl = cpu_to_be32(
1187 V_FW_WR_OP(FW_RI_WR) |
1189 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
1190 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1192 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1194 wqe->u.init.type = FW_RI_TYPE_INIT;
1195 wqe->u.init.mpareqbit_p2ptype =
1196 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1197 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1198 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1199 if (qhp->attr.mpa_attr.recv_marker_enabled)
1200 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1201 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1202 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1203 if (qhp->attr.mpa_attr.crc_enabled)
1204 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1206 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1207 FW_RI_QP_RDMA_WRITE_ENABLE |
1208 FW_RI_QP_BIND_ENABLE;
1209 if (!qhp->ibqp.uobject)
1210 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1211 FW_RI_QP_STAG0_ENABLE;
1212 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1213 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1214 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1215 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1216 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1217 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1218 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1219 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1220 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1221 wqe->u.init.iss = cpu_to_be32(ep->snd_seq);
1222 wqe->u.init.irs = cpu_to_be32(ep->rcv_seq);
1223 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1224 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1226 if (qhp->attr.mpa_attr.initiator)
1227 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1229 c4iw_init_wr_wait(&ep->com.wr_wait);
1231 ret = creds(toep, inp, sizeof(*wqe));
1238 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
1239 qhp->wq.sq.qid, __func__);
1241 toep->ulp_mode = ULP_MODE_RDMA;
1246 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1247 enum c4iw_qp_attr_mask mask,
1248 struct c4iw_qp_attributes *attrs,
1252 struct c4iw_qp_attributes newattr = qhp->attr;
1257 struct c4iw_ep *ep = NULL;
1259 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp,
1260 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep);
1261 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state,
1262 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1264 mutex_lock(&qhp->mutex);
1266 /* Process attr changes if in IDLE */
1267 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1268 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1272 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1273 newattr.enable_rdma_read = attrs->enable_rdma_read;
1274 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1275 newattr.enable_rdma_write = attrs->enable_rdma_write;
1276 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1277 newattr.enable_bind = attrs->enable_bind;
1278 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1279 if (attrs->max_ord > c4iw_max_read_depth) {
1283 newattr.max_ord = attrs->max_ord;
1285 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1286 if (attrs->max_ird > c4iw_max_read_depth) {
1290 newattr.max_ird = attrs->max_ird;
1292 qhp->attr = newattr;
1295 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1297 if (qhp->attr.state == attrs->next_state)
1300 switch (qhp->attr.state) {
1301 case C4IW_QP_STATE_IDLE:
1302 switch (attrs->next_state) {
1303 case C4IW_QP_STATE_RTS:
1304 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1308 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1312 qhp->attr.mpa_attr = attrs->mpa_attr;
1313 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1314 qhp->ep = qhp->attr.llp_stream_handle;
1315 set_state(qhp, C4IW_QP_STATE_RTS);
1318 * Ref the endpoint here and deref when we
1319 * disassociate the endpoint from the QP. This
1320 * happens in CLOSING->IDLE transition or *->ERROR
1323 c4iw_get_ep(&qhp->ep->com);
1324 ret = rdma_init(rhp, qhp);
1328 case C4IW_QP_STATE_ERROR:
1329 set_state(qhp, C4IW_QP_STATE_ERROR);
1337 case C4IW_QP_STATE_RTS:
1338 switch (attrs->next_state) {
1339 case C4IW_QP_STATE_CLOSING:
1340 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1341 set_state(qhp, C4IW_QP_STATE_CLOSING);
1346 c4iw_get_ep(&qhp->ep->com);
1348 if (qhp->ibqp.uobject)
1349 t4_set_wq_in_error(&qhp->wq);
1350 ret = rdma_fini(rhp, qhp, ep);
1354 case C4IW_QP_STATE_TERMINATE:
1355 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1356 qhp->attr.layer_etype = attrs->layer_etype;
1357 qhp->attr.ecode = attrs->ecode;
1358 if (qhp->ibqp.uobject)
1359 t4_set_wq_in_error(&qhp->wq);
1364 c4iw_get_ep(&qhp->ep->com);
1366 case C4IW_QP_STATE_ERROR:
1367 set_state(qhp, C4IW_QP_STATE_ERROR);
1368 if (qhp->ibqp.uobject)
1369 t4_set_wq_in_error(&qhp->wq);
1374 c4iw_get_ep(&qhp->ep->com);
1383 case C4IW_QP_STATE_CLOSING:
1386 * Allow kernel users to move to ERROR for qp draining.
1388 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1389 C4IW_QP_STATE_ERROR)) {
1393 switch (attrs->next_state) {
1394 case C4IW_QP_STATE_IDLE:
1396 set_state(qhp, C4IW_QP_STATE_IDLE);
1397 qhp->attr.llp_stream_handle = NULL;
1398 c4iw_put_ep(&qhp->ep->com);
1400 wake_up(&qhp->wait);
1402 case C4IW_QP_STATE_ERROR:
1409 case C4IW_QP_STATE_ERROR:
1410 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1414 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1418 set_state(qhp, C4IW_QP_STATE_IDLE);
1420 case C4IW_QP_STATE_TERMINATE:
1428 printf("%s in a bad state %d\n",
1429 __func__, qhp->attr.state);
1436 CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__,
1437 qhp->ep, qhp->wq.sq.qid);
1439 /* disassociate the LLP connection */
1440 qhp->attr.llp_stream_handle = NULL;
1444 set_state(qhp, C4IW_QP_STATE_ERROR);
1449 wake_up(&qhp->wait);
1451 mutex_unlock(&qhp->mutex);
1454 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1457 * If disconnect is 1, then we need to initiate a disconnect
1458 * on the EP. This can be a normal close (RTS->CLOSING) or
1459 * an abnormal close (RTS/CLOSING->ERROR).
1462 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1464 c4iw_put_ep(&ep->com);
1468 * If free is 1, then we've disassociated the EP from the QP
1469 * and we need to dereference the EP.
1472 c4iw_put_ep(&ep->com);
1473 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state);
1477 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1479 struct c4iw_dev *rhp;
1480 struct c4iw_qp *qhp;
1481 struct c4iw_qp_attributes attrs;
1482 struct c4iw_ucontext *ucontext;
1484 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp);
1485 qhp = to_c4iw_qp(ib_qp);
1488 attrs.next_state = C4IW_QP_STATE_ERROR;
1489 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1490 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1492 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1493 wait_event(qhp->wait, !qhp->ep);
1495 spin_lock_irq(&rhp->lock);
1496 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1497 spin_unlock_irq(&rhp->lock);
1498 atomic_dec(&qhp->refcnt);
1499 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1501 ucontext = ib_qp->uobject ?
1502 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1503 destroy_qp(&rhp->rdev, &qhp->wq,
1504 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1506 CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp,
1513 c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1514 struct ib_udata *udata)
1516 struct c4iw_dev *rhp;
1517 struct c4iw_qp *qhp;
1518 struct c4iw_pd *php;
1519 struct c4iw_cq *schp;
1520 struct c4iw_cq *rchp;
1521 struct c4iw_create_qp_resp uresp;
1523 struct c4iw_ucontext *ucontext;
1525 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1527 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
1529 if (attrs->qp_type != IB_QPT_RC)
1530 return ERR_PTR(-EINVAL);
1532 php = to_c4iw_pd(pd);
1534 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1535 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1537 return ERR_PTR(-EINVAL);
1539 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1540 return ERR_PTR(-EINVAL);
1542 spg_ndesc = rhp->rdev.adap->params.sge.spg_len / EQ_ESIZE;
1543 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1544 if (rqsize > T4_MAX_RQ_SIZE(spg_ndesc))
1545 return ERR_PTR(-E2BIG);
1547 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1548 if (sqsize > T4_MAX_SQ_SIZE(spg_ndesc))
1549 return ERR_PTR(-E2BIG);
1551 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1554 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1556 return ERR_PTR(-ENOMEM);
1557 qhp->wq.sq.size = sqsize;
1558 qhp->wq.sq.memsize = (sqsize + spg_ndesc) * sizeof *qhp->wq.sq.queue +
1559 16 * sizeof(__be64);
1560 qhp->wq.rq.size = rqsize;
1561 qhp->wq.rq.memsize = (rqsize + spg_ndesc) * sizeof *qhp->wq.rq.queue;
1564 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1565 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1568 CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu",
1569 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1571 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1572 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1576 attrs->cap.max_recv_wr = rqsize - 1;
1577 attrs->cap.max_send_wr = sqsize - 1;
1578 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1581 qhp->attr.pd = php->pdid;
1582 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1583 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1584 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1585 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1586 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1587 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1588 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1589 qhp->attr.state = C4IW_QP_STATE_IDLE;
1590 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1591 qhp->attr.enable_rdma_read = 1;
1592 qhp->attr.enable_rdma_write = 1;
1593 qhp->attr.enable_bind = 1;
1594 qhp->attr.max_ord = 1;
1595 qhp->attr.max_ird = 1;
1596 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1597 spin_lock_init(&qhp->lock);
1598 mutex_init(&qhp->mutex);
1599 init_waitqueue_head(&qhp->wait);
1600 atomic_set(&qhp->refcnt, 1);
1602 spin_lock_irq(&rhp->lock);
1603 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1604 spin_unlock_irq(&rhp->lock);
1609 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1614 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1619 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1624 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1630 uresp.qid_mask = rhp->rdev.qpmask;
1631 uresp.sqid = qhp->wq.sq.qid;
1632 uresp.sq_size = qhp->wq.sq.size;
1633 uresp.sq_memsize = qhp->wq.sq.memsize;
1634 uresp.rqid = qhp->wq.rq.qid;
1635 uresp.rq_size = qhp->wq.rq.size;
1636 uresp.rq_memsize = qhp->wq.rq.memsize;
1637 spin_lock(&ucontext->mmap_lock);
1638 uresp.sq_key = ucontext->key;
1639 ucontext->key += PAGE_SIZE;
1640 uresp.rq_key = ucontext->key;
1641 ucontext->key += PAGE_SIZE;
1642 uresp.sq_db_gts_key = ucontext->key;
1643 ucontext->key += PAGE_SIZE;
1644 uresp.rq_db_gts_key = ucontext->key;
1645 ucontext->key += PAGE_SIZE;
1646 spin_unlock(&ucontext->mmap_lock);
1647 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1650 mm1->key = uresp.sq_key;
1651 mm1->addr = qhp->wq.sq.phys_addr;
1652 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1653 CTR4(KTR_IW_CXGBE, "%s mm1 %x, %x, %d", __func__, mm1->key,
1654 mm1->addr, mm1->len);
1655 insert_mmap(ucontext, mm1);
1656 mm2->key = uresp.rq_key;
1657 mm2->addr = vtophys(qhp->wq.rq.queue);
1658 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1659 CTR4(KTR_IW_CXGBE, "%s mm2 %x, %x, %d", __func__, mm2->key,
1660 mm2->addr, mm2->len);
1661 insert_mmap(ucontext, mm2);
1662 mm3->key = uresp.sq_db_gts_key;
1663 mm3->addr = qhp->wq.sq.udb;
1664 mm3->len = PAGE_SIZE;
1665 CTR4(KTR_IW_CXGBE, "%s mm3 %x, %x, %d", __func__, mm3->key,
1666 mm3->addr, mm3->len);
1667 insert_mmap(ucontext, mm3);
1668 mm4->key = uresp.rq_db_gts_key;
1669 mm4->addr = qhp->wq.rq.udb;
1670 mm4->len = PAGE_SIZE;
1671 CTR4(KTR_IW_CXGBE, "%s mm4 %x, %x, %d", __func__, mm4->key,
1672 mm4->addr, mm4->len);
1673 insert_mmap(ucontext, mm4);
1675 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1676 init_timer(&(qhp->timer));
1678 "%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x",
1679 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1691 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1693 destroy_qp(&rhp->rdev, &qhp->wq,
1694 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1697 return ERR_PTR(ret);
1700 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1701 int attr_mask, struct ib_udata *udata)
1703 struct c4iw_dev *rhp;
1704 struct c4iw_qp *qhp;
1705 enum c4iw_qp_attr_mask mask = 0;
1706 struct c4iw_qp_attributes attrs;
1708 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp);
1710 /* iwarp does not support the RTR state */
1711 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1712 attr_mask &= ~IB_QP_STATE;
1714 /* Make sure we still have something left to do */
1718 memset(&attrs, 0, sizeof attrs);
1719 qhp = to_c4iw_qp(ibqp);
1722 attrs.next_state = c4iw_convert_state(attr->qp_state);
1723 attrs.enable_rdma_read = (attr->qp_access_flags &
1724 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1725 attrs.enable_rdma_write = (attr->qp_access_flags &
1726 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1727 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1730 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1731 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1732 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1733 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1734 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1736 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1739 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1741 CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn);
1742 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1745 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1746 int attr_mask, struct ib_qp_init_attr *init_attr)
1748 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1750 memset(attr, 0, sizeof *attr);
1751 memset(init_attr, 0, sizeof *init_attr);
1752 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1753 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1754 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1755 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1756 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1757 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1758 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;