1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/pciio.h>
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 #include <sys/bus_dma.h>
43 #include <sys/ioccom.h>
45 #include <sys/mutex.h>
46 #include <sys/rwlock.h>
47 #include <sys/linker.h>
48 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
57 #include <sys/queue.h>
58 #include <sys/libkern.h>
60 #include <netinet/in.h>
62 #include <contrib/rdma/ib_verbs.h>
63 #include <contrib/rdma/ib_umem.h>
64 #include <contrib/rdma/ib_user_verbs.h>
68 #include <cxgb_include.h>
69 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
70 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
71 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
72 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
73 #include <ulp/iw_cxgb/iw_cxgb.h>
74 #include <ulp/iw_cxgb/iw_cxgb_resource.h>
75 #include <ulp/iw_cxgb/iw_cxgb_user.h>
77 #include <dev/cxgb/cxgb_include.h>
78 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_wr.h>
79 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_hal.h>
80 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.h>
81 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.h>
82 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb.h>
83 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_resource.h>
84 #include <dev/cxgb/ulp/iw_cxgb/iw_cxgb_user.h>
88 post_qp_event(struct iwch_dev *rnicp, struct iwch_qp *qhp, struct iwch_cq *chp,
89 struct respQ_msg_t *rsp_msg,
90 enum ib_event_type ib_event,
93 struct ib_event event;
94 struct iwch_qp_attributes attrs;
96 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
97 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
98 CTR4(KTR_IW_CXGB, "%s AE received after RTS - "
99 "qp state %d qpid 0x%x status 0x%x", __FUNCTION__,
100 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
104 log(LOG_ERR, "%s - AE qpid 0x%x opcode %d status 0x%x "
105 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
106 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
107 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
108 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
111 event.event = ib_event;
112 event.device = chp->ibcq.device;
113 if (ib_event == IB_EVENT_CQ_ERR)
114 event.element.cq = &chp->ibcq;
116 event.element.qp = &qhp->ibqp;
118 if (qhp->ibqp.event_handler)
119 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
121 if (qhp->attr.state == IWCH_QP_STATE_RTS) {
122 attrs.next_state = IWCH_QP_STATE_TERMINATE;
123 iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
126 iwch_post_terminate(qhp, rsp_msg);
131 iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct mbuf *m)
133 struct iwch_dev *rnicp;
134 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) m->m_data;
137 u32 cqid = RSPQ_CQID(rsp_msg);
139 rnicp = (struct iwch_dev *) rdev_p->ulp;
140 mtx_lock(&rnicp->lock);
141 chp = get_chp(rnicp, cqid);
142 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
144 log(LOG_ERR,"BAD AE cqid 0x%x qpid 0x%x opcode %d "
145 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
146 cqid, CQE_QPID(rsp_msg->cqe),
147 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
148 CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
149 CQE_WRID_LOW(rsp_msg->cqe));
150 mtx_unlock(&rnicp->lock);
153 iwch_qp_add_ref(&qhp->ibqp);
154 mtx_lock(&chp->lock);
156 mtx_unlock(&chp->lock);
157 mtx_unlock(&rnicp->lock);
160 * 1) completion of our sending a TERMINATE.
161 * 2) incoming TERMINATE message.
163 if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
164 (CQE_STATUS(rsp_msg->cqe) == 0)) {
165 if (SQ_TYPE(rsp_msg->cqe)) {
166 CTR3(KTR_IW_CXGB, "%s QPID 0x%x ep %p disconnecting",
167 __FUNCTION__, qhp->wq.qpid, qhp->ep);
168 iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
170 CTR2(KTR_IW_CXGB, "%s post REQ_ERR AE QPID 0x%x", __FUNCTION__,
172 post_qp_event(rnicp, qhp, chp, rsp_msg,
173 IB_EVENT_QP_REQ_ERR, 0);
174 iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
179 /* Bad incoming Read request */
180 if (SQ_TYPE(rsp_msg->cqe) &&
181 (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
182 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
186 /* Bad incoming write */
187 if (RQ_TYPE(rsp_msg->cqe) &&
188 (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
189 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
193 switch (CQE_STATUS(rsp_msg->cqe)) {
195 /* Completion Events */
196 case TPT_ERR_SUCCESS:
199 * Confirm the destination entry if this is a RECV completion.
201 if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
202 dst_confirm(qhp->ep->dst);
204 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
213 case TPT_ERR_INVALIDATE_SHARED_MR:
214 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
215 log(LOG_ERR, "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
216 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
217 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
218 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
219 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
220 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
221 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
224 /* Device Fatal Errors */
226 case TPT_ERR_ECC_PSTAG:
227 case TPT_ERR_INTERNAL_ERR:
228 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
231 /* QP Fatal Errors */
232 case TPT_ERR_OUT_OF_RQE:
233 case TPT_ERR_PBL_ADDR_BOUND:
236 case TPT_ERR_PDU_LEN_ERR:
237 case TPT_ERR_DDP_VERSION:
238 case TPT_ERR_RDMA_VERSION:
240 case TPT_ERR_DDP_QUEUE_NUM:
244 case TPT_ERR_MSN_GAP:
245 case TPT_ERR_MSN_RANGE:
246 case TPT_ERR_RQE_ADDR_BOUND:
247 case TPT_ERR_IRD_OVERFLOW:
248 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
252 log(LOG_ERR,"Unknown T3 status 0x%x QPID 0x%x\n",
253 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
254 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
258 mtx_lock(&chp->lock);
259 if (--chp->refcnt == 0)
261 mtx_unlock(&chp->lock);
262 iwch_qp_rem_ref(&qhp->ibqp);