]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_ev.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / dev / cxgb / ulp / iw_cxgb / iw_cxgb_ev.c
1 /**************************************************************************
2
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_inet.h"
33
34 #ifdef TCP_OFFLOAD
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/pciio.h>
40 #include <sys/conf.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/bus_dma.h>
44 #include <sys/rman.h>
45 #include <sys/ioccom.h>
46 #include <sys/mbuf.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/linker.h>
50 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/queue.h>
57 #include <sys/taskqueue.h>
58 #include <sys/proc.h>
59 #include <sys/queue.h>
60 #include <sys/libkern.h>
61
62 #include <netinet/in.h>
63
64 #include <rdma/ib_verbs.h>
65 #include <rdma/ib_umem.h>
66 #include <rdma/ib_user_verbs.h>
67 #include <linux/idr.h>
68 #include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
69
70 #include <cxgb_include.h>
71 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
72 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
73 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
74 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
75 #include <ulp/iw_cxgb/iw_cxgb.h>
76 #include <ulp/iw_cxgb/iw_cxgb_resource.h>
77 #include <ulp/iw_cxgb/iw_cxgb_user.h>
78
79 static void
80 post_qp_event(struct iwch_dev *rnicp, struct iwch_qp *qhp, struct iwch_cq *chp,
81                 struct respQ_msg_t *rsp_msg,
82                 enum ib_event_type ib_event,
83                 int send_term)
84 {
85         struct ib_event event;
86         struct iwch_qp_attributes attrs;
87
88         mtx_lock(&rnicp->lock);
89
90         if (!qhp) {
91                 CTR3(KTR_IW_CXGB, "%s unaffiliated error 0x%x qpid 0x%x\n",
92                        __func__, CQE_STATUS(rsp_msg->cqe),
93                        CQE_QPID(rsp_msg->cqe));
94                 mtx_unlock(&rnicp->lock);
95                 return;
96         }
97
98         if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
99             (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
100                 CTR4(KTR_IW_CXGB, "%s AE received after RTS - "
101                      "qp state %d qpid 0x%x status 0x%x", __FUNCTION__,
102                      qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
103                 mtx_unlock(&rnicp->lock);
104                 return;
105         }
106
107         log(LOG_ERR, "%s - AE qpid 0x%x opcode %d status 0x%x "
108                "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
109                CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
110                CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
111                CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
112
113         mtx_unlock(&rnicp->lock);
114
115         if (qhp->attr.state == IWCH_QP_STATE_RTS) {
116                 attrs.next_state = IWCH_QP_STATE_TERMINATE;
117                 iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
118                                &attrs, 1);
119                 if (send_term)
120                         iwch_post_terminate(qhp, rsp_msg);
121         }
122
123         event.event = ib_event;
124         event.device = chp->ibcq.device;
125         if (ib_event == IB_EVENT_CQ_ERR)
126                 event.element.cq = &chp->ibcq;
127         else
128                 event.element.qp = &qhp->ibqp;
129
130         if (qhp->ibqp.event_handler)
131                 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
132
133         (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
134 }
135
136 void
137 iwch_ev_dispatch(struct iwch_dev *rnicp, struct mbuf *m)
138 {
139         struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) m->m_data;
140         struct iwch_cq *chp;
141         struct iwch_qp *qhp;
142         u32 cqid = RSPQ_CQID(rsp_msg);
143
144         mtx_lock(&rnicp->lock);
145         chp = get_chp(rnicp, cqid);
146         qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
147         if (!chp || !qhp) {
148                 log(LOG_ERR,"BAD AE cqid 0x%x qpid 0x%x opcode %d "
149                        "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
150                        cqid, CQE_QPID(rsp_msg->cqe),
151                        CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
152                        CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
153                        CQE_WRID_LOW(rsp_msg->cqe));
154                 mtx_unlock(&rnicp->lock);
155                 return;
156         }
157         iwch_qp_add_ref(&qhp->ibqp);
158         mtx_lock(&chp->lock);
159         ++chp->refcnt;
160         mtx_unlock(&chp->lock);
161         mtx_unlock(&rnicp->lock);
162
163         /*
164          * 1) completion of our sending a TERMINATE.
165          * 2) incoming TERMINATE message.
166          */
167         if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
168             (CQE_STATUS(rsp_msg->cqe) == 0)) {
169                 if (SQ_TYPE(rsp_msg->cqe)) {
170                         CTR3(KTR_IW_CXGB, "%s QPID 0x%x ep %p disconnecting",
171                              __FUNCTION__, qhp->wq.qpid, qhp->ep);
172                         iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
173                 } else {
174                         CTR2(KTR_IW_CXGB, "%s post REQ_ERR AE QPID 0x%x", __FUNCTION__,
175                              qhp->wq.qpid);
176                         post_qp_event(rnicp, qhp, chp, rsp_msg,
177                                       IB_EVENT_QP_REQ_ERR, 0);
178                         iwch_ep_disconnect(qhp->ep, 0, M_NOWAIT);
179                 }
180                 goto done;
181         }
182
183         /* Bad incoming Read request */
184         if (SQ_TYPE(rsp_msg->cqe) &&
185             (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
186                 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
187                 goto done;
188         }
189
190         /* Bad incoming write */
191         if (RQ_TYPE(rsp_msg->cqe) &&
192             (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
193                 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
194                 goto done;
195         }
196
197         switch (CQE_STATUS(rsp_msg->cqe)) {
198
199         /* Completion Events */
200         case TPT_ERR_SUCCESS:
201 #if 0
202                 /*
203                  * Confirm the destination entry if this is a RECV completion.
204                  */
205                 if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
206                         dst_confirm(qhp->ep->dst);
207 #endif          
208                 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
209                 break;
210
211         case TPT_ERR_STAG:
212         case TPT_ERR_PDID:
213         case TPT_ERR_QPID:
214         case TPT_ERR_ACCESS:
215         case TPT_ERR_WRAP:
216         case TPT_ERR_BOUND:
217         case TPT_ERR_INVALIDATE_SHARED_MR:
218         case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
219                 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
220                 break;
221
222         /* Device Fatal Errors */
223         case TPT_ERR_ECC:
224         case TPT_ERR_ECC_PSTAG:
225         case TPT_ERR_INTERNAL_ERR:
226                 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
227                 break;
228
229         /* QP Fatal Errors */
230         case TPT_ERR_OUT_OF_RQE:
231         case TPT_ERR_PBL_ADDR_BOUND:
232         case TPT_ERR_CRC:
233         case TPT_ERR_MARKER:
234         case TPT_ERR_PDU_LEN_ERR:
235         case TPT_ERR_DDP_VERSION:
236         case TPT_ERR_RDMA_VERSION:
237         case TPT_ERR_OPCODE:
238         case TPT_ERR_DDP_QUEUE_NUM:
239         case TPT_ERR_MSN:
240         case TPT_ERR_TBIT:
241         case TPT_ERR_MO:
242         case TPT_ERR_MSN_GAP:
243         case TPT_ERR_MSN_RANGE:
244         case TPT_ERR_RQE_ADDR_BOUND:
245         case TPT_ERR_IRD_OVERFLOW:
246                 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
247                 break;
248
249         default:
250                 log(LOG_ERR,"Unknown T3 status 0x%x QPID 0x%x\n",
251                        CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
252                 post_qp_event(rnicp, qhp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
253                 break;
254         }
255 done:
256         mtx_lock(&chp->lock);
257         if (--chp->refcnt == 0)
258                 wakeup(chp);
259         mtx_unlock(&chp->lock);
260         iwch_qp_rem_ref(&qhp->ibqp);
261 }
262 #endif