1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/pciio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/bus_dma.h>
45 #include <sys/ioccom.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/linker.h>
50 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/queue.h>
57 #include <sys/taskqueue.h>
59 #include <sys/queue.h>
60 #include <sys/libkern.h>
62 #include <netinet/in.h>
64 #include <rdma/ib_verbs.h>
65 #include <rdma/ib_umem.h>
66 #include <rdma/ib_user_verbs.h>
67 #include <linux/idr.h>
68 #include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
70 #include <cxgb_include.h>
71 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
72 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
73 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
74 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
75 #include <ulp/iw_cxgb/iw_cxgb.h>
76 #include <ulp/iw_cxgb/iw_cxgb_resource.h>
77 #include <ulp/iw_cxgb/iw_cxgb_user.h>
80 post_qp_event(struct iwch_dev *rnicp, struct iwch_qp *qhp, struct iwch_cq *chp,
81 struct respQ_msg_t *rsp_msg,
82 enum ib_event_type ib_event,
85 struct ib_event event;
86 struct iwch_qp_attributes attrs;
88 mtx_lock(&rnicp->lock);
91 CTR3(KTR_IW_CXGB, "%s unaffiliated error 0x%x qpid 0x%x\n",
92 __func__, CQE_STATUS(rsp_msg->cqe),
93 CQE_QPID(rsp_msg->cqe));
94 mtx_unlock(&rnicp->lock);
98 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
99 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
100 CTR4(KTR_IW_CXGB, "%s AE received after RTS - "
101 "qp state %d qpid 0x%x status 0x%x", __FUNCTION__,
102 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
103 mtx_unlock(&rnicp->lock);
107 log(LOG_ERR, "%s - AE qpid 0x%x opcode %d status 0x%x "
108 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
109 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
110 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
111 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
113 mtx_unlock(&rnicp->lock);
115 if (qhp->attr.state == IWCH_QP_STATE_RTS) {
116 attrs.next_state = IWCH_QP_STATE_TERMINATE;
117 iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
120 iwch_post_terminate(qhp, rsp_msg);
123 event.event = ib_event;
124 event.device = chp->ibcq.device;
125 if (ib_event == IB_EVENT_CQ_ERR)
126 event.element.cq = &chp->ibcq;
128 event.element.qp = &qhp->ibqp;
130 if (qhp->ibqp.event_handler)
131 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
133 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
137 iwch_ev_dispatch(struct iwch_dev *rnicp, struct mbuf *m)
139 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) m->m_data;
142 u32 cqid = RSPQ_CQID(rsp_msg);
144 mtx_lock(&rnicp->lock);
145 chp = get_chp(rnicp, cqid);
146 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
148 log(LOG_ERR,"BAD AE cqid 0x%x qpid 0x%x opcode %d "
149 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
150 cqid, CQE_QPID(rsp_msg->cqe),
151 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
152 CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
153 CQE_WRID_LOW(rsp_msg->cqe));
154 mtx_unlock(&rnicp->lock);
157 iwch_qp_add_ref(&qhp->ibqp);
158 mtx_lock(&chp->lock);
160 mtx_unlock(&chp->lock);
161 mtx_unlock(&rnicp->lock);
164 * 1) completion of our sending a TERMINATE.
165 * 2) incoming TERMINATE message.
167 if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
168 (CQE_STATUS(rsp_msg->cqe) == 0)) {
169 if (SQ_TYPE(rsp_msg->cqe)) {
170 CTR3(KTR_IW_CXGB, "%s QPID 0x%x ep %p disconnecting",
171 __FUNCTION__, qhp->wq.qpid, qhp->ep);
172 iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
174 CTR2(KTR_IW_CXGB, "%s post REQ_ERR AE QPID 0x%x", __FUNCTION__,
176 post_qp_event(rnicp, qhp, chp, rsp_msg,
177 IB_EVENT_QP_REQ_ERR, 0);
178 iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
183 /* Bad incoming Read request */
184 if (SQ_TYPE(rsp_msg->cqe) &&
185 (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
186 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
190 /* Bad incoming write */
191 if (RQ_TYPE(rsp_msg->cqe) &&
192 (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
193 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
197 switch (CQE_STATUS(rsp_msg->cqe)) {
199 /* Completion Events */
200 case TPT_ERR_SUCCESS:
203 * Confirm the destination entry if this is a RECV completion.
205 if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
206 dst_confirm(qhp->ep->dst);
208 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
217 case TPT_ERR_INVALIDATE_SHARED_MR:
218 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
219 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
222 /* Device Fatal Errors */
224 case TPT_ERR_ECC_PSTAG:
225 case TPT_ERR_INTERNAL_ERR:
226 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
229 /* QP Fatal Errors */
230 case TPT_ERR_OUT_OF_RQE:
231 case TPT_ERR_PBL_ADDR_BOUND:
234 case TPT_ERR_PDU_LEN_ERR:
235 case TPT_ERR_DDP_VERSION:
236 case TPT_ERR_RDMA_VERSION:
238 case TPT_ERR_DDP_QUEUE_NUM:
242 case TPT_ERR_MSN_GAP:
243 case TPT_ERR_MSN_RANGE:
244 case TPT_ERR_RQE_ADDR_BOUND:
245 case TPT_ERR_IRD_OVERFLOW:
246 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
250 log(LOG_ERR,"Unknown T3 status 0x%x QPID 0x%x\n",
251 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
252 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
256 mtx_lock(&chp->lock);
257 if (--chp->refcnt == 0)
259 mtx_unlock(&chp->lock);
260 iwch_qp_rem_ref(&qhp->ibqp);