1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/pciio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/bus_dma.h>
45 #include <sys/ioccom.h>
47 #include <sys/rwlock.h>
48 #include <sys/linker.h>
49 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/queue.h>
57 #include <sys/taskqueue.h>
61 #include <net/route.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/in.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip_var.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcpip.h>
71 #include <rdma/ib_verbs.h>
72 #include <linux/idr.h>
73 #include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
75 #include <cxgb_include.h>
76 #include <ulp/tom/cxgb_tom.h>
77 #include <ulp/tom/cxgb_toepcb.h>
78 #include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
79 #include <rdma/ib_verbs.h>
80 #include <linux/idr.h>
82 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
83 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
84 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
85 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
86 #include <ulp/iw_cxgb/iw_cxgb.h>
89 static char *states[] = {
106 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgb, CTLFLAG_RD, 0, "iw_cxgb driver parameters");
108 static int ep_timeout_secs = 60;
109 TUNABLE_INT("hw.iw_cxgb.ep_timeout_secs", &ep_timeout_secs);
110 SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
111 "CM Endpoint operation timeout in seconds (default=60)");
113 static int mpa_rev = 1;
114 TUNABLE_INT("hw.iw_cxgb.mpa_rev", &mpa_rev);
115 SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
116 "MPA Revision, 0 supports amso1100, 1 is spec compliant. (default=1)");
118 static int markers_enabled = 0;
119 TUNABLE_INT("hw.iw_cxgb.markers_enabled", &markers_enabled);
120 SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
121 "Enable MPA MARKERS (default(0)=disabled)");
123 static int crc_enabled = 1;
124 TUNABLE_INT("hw.iw_cxgb.crc_enabled", &crc_enabled);
125 SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
126 "Enable MPA CRC (default(1)=enabled)");
128 static int rcv_win = 256 * 1024;
129 TUNABLE_INT("hw.iw_cxgb.rcv_win", &rcv_win);
130 SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
131 "TCP receive window in bytes (default=256KB)");
133 static int snd_win = 32 * 1024;
134 TUNABLE_INT("hw.iw_cxgb.snd_win", &snd_win);
135 SYSCTL_INT(_hw_iw_cxgb, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
136 "TCP send window in bytes (default=32KB)");
138 static unsigned int nocong = 0;
139 TUNABLE_INT("hw.iw_cxgb.nocong", &nocong);
140 SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, nocong, CTLFLAG_RW, &nocong, 0,
141 "Turn off congestion control (default=0)");
143 static unsigned int cong_flavor = 1;
144 TUNABLE_INT("hw.iw_cxgb.cong_flavor", &cong_flavor);
145 SYSCTL_UINT(_hw_iw_cxgb, OID_AUTO, cong_flavor, CTLFLAG_RW, &cong_flavor, 0,
146 "TCP Congestion control flavor (default=1)");
148 static void ep_timeout(void *arg);
149 static void connect_reply_upcall(struct iwch_ep *ep, int status);
150 static int iwch_so_upcall(struct socket *so, void *arg, int waitflag);
153 * Cruft to offload socket upcalls onto thread.
155 static struct mtx req_lock;
156 static TAILQ_HEAD(iwch_ep_list, iwch_ep_common) req_list;
157 static struct task iw_cxgb_task;
158 static struct taskqueue *iw_cxgb_taskq;
159 static void process_req(void *ctx, int pending);
162 start_ep_timer(struct iwch_ep *ep)
164 CTR2(KTR_IW_CXGB, "%s ep %p", __FUNCTION__, ep);
165 if (callout_pending(&ep->timer)) {
166 CTR2(KTR_IW_CXGB, "%s stopped / restarted timer ep %p", __FUNCTION__, ep);
167 callout_deactivate(&ep->timer);
168 callout_drain(&ep->timer);
171 * XXX this looks racy
174 callout_init(&ep->timer, TRUE);
176 callout_reset(&ep->timer, ep_timeout_secs * hz, ep_timeout, ep);
180 stop_ep_timer(struct iwch_ep *ep)
182 CTR2(KTR_IW_CXGB, "%s ep %p", __FUNCTION__, ep);
183 if (!callout_pending(&ep->timer)) {
184 CTR3(KTR_IW_CXGB, "%s timer stopped when its not running! ep %p state %u\n",
185 __func__, ep, ep->com.state);
188 callout_drain(&ep->timer);
193 set_tcpinfo(struct iwch_ep *ep)
195 struct socket *so = ep->com.so;
196 struct inpcb *inp = sotoinpcb(so);
204 if ((tp->t_flags & TF_TOE) == 0) {
206 printf("%s: connection NOT OFFLOADED!\n", __func__);
211 ep->hwtid = toep->tp_tid;
212 ep->snd_seq = tp->snd_nxt;
213 ep->rcv_seq = tp->rcv_nxt;
214 ep->emss = tp->t_maxseg;
223 static enum iwch_ep_state
224 state_read(struct iwch_ep_common *epc)
226 enum iwch_ep_state state;
228 mtx_lock(&epc->lock);
230 mtx_unlock(&epc->lock);
235 __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
241 state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
244 mtx_lock(&epc->lock);
245 CTR3(KTR_IW_CXGB, "%s - %s -> %s", __FUNCTION__, states[epc->state], states[new]);
246 __state_set(epc, new);
247 mtx_unlock(&epc->lock);
252 alloc_ep(int size, int flags)
254 struct iwch_ep_common *epc;
256 epc = malloc(size, M_DEVBUF, flags);
258 memset(epc, 0, size);
259 refcount_init(&epc->refcount, 1);
260 mtx_init(&epc->lock, "iwch_epc lock", NULL, MTX_DEF|MTX_DUPOK);
261 cv_init(&epc->waitq, "iwch_epc cv");
263 CTR2(KTR_IW_CXGB, "%s alloc ep %p", __FUNCTION__, epc);
267 void __free_ep(struct iwch_ep_common *epc)
269 CTR3(KTR_IW_CXGB, "%s ep %p state %s", __FUNCTION__, epc, states[state_read(epc)]);
270 KASSERT(!epc->so, ("%s warning ep->so %p \n", __FUNCTION__, epc->so));
271 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __FUNCTION__, epc));
275 static struct rtentry *
276 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
277 __be16 peer_port, u8 tos)
279 struct route iproute;
280 struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst;
282 bzero(&iproute, sizeof iproute);
283 dst->sin_family = AF_INET;
284 dst->sin_len = sizeof *dst;
285 dst->sin_addr.s_addr = peer_ip;
288 return iproute.ro_rt;
292 close_socket(struct iwch_ep_common *epc, int close)
294 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, epc, epc->so, states[epc->state]);
296 soupcall_clear(epc->so, SO_RCV);
297 SOCK_UNLOCK(epc->so);
301 soshutdown(epc->so, SHUT_WR|SHUT_RD);
306 shutdown_socket(struct iwch_ep_common *epc)
308 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, epc, epc->so, states[epc->state]);
309 soshutdown(epc->so, SHUT_WR);
313 abort_socket(struct iwch_ep *ep)
319 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
323 /* linger_time of 0 forces RST to be sent */
324 sopt.sopt_dir = SOPT_SET;
325 sopt.sopt_level = SOL_SOCKET;
326 sopt.sopt_name = SO_LINGER;
327 sopt.sopt_val = (caddr_t)&l;
328 sopt.sopt_valsize = sizeof l;
330 err = sosetopt(ep->com.so, &sopt);
332 printf("%s can't set linger to 0, no RST! err %d\n", __FUNCTION__, err);
336 send_mpa_req(struct iwch_ep *ep)
339 struct mpa_message *mpa;
343 CTR3(KTR_IW_CXGB, "%s ep %p pd_len %d", __FUNCTION__, ep, ep->plen);
345 mpalen = sizeof(*mpa) + ep->plen;
346 m = m_gethdr(mpalen, M_NOWAIT);
348 connect_reply_upcall(ep, -ENOMEM);
351 mpa = mtod(m, struct mpa_message *);
353 m->m_pkthdr.len = mpalen;
354 memset(mpa, 0, sizeof(*mpa));
355 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
356 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
357 (markers_enabled ? MPA_MARKERS : 0);
358 mpa->private_data_size = htons(ep->plen);
359 mpa->revision = mpa_rev;
361 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
363 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
366 connect_reply_upcall(ep, -ENOMEM);
371 state_set(&ep->com, MPA_REQ_SENT);
376 send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
379 struct mpa_message *mpa;
383 CTR3(KTR_IW_CXGB, "%s ep %p plen %d", __FUNCTION__, ep, plen);
385 mpalen = sizeof(*mpa) + plen;
387 m = m_gethdr(mpalen, M_NOWAIT);
389 printf("%s - cannot alloc mbuf!\n", __FUNCTION__);
392 mpa = mtod(m, struct mpa_message *);
394 m->m_pkthdr.len = mpalen;
395 memset(mpa, 0, sizeof(*mpa));
396 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
397 mpa->flags = MPA_REJECT;
398 mpa->revision = mpa_rev;
399 mpa->private_data_size = htons(plen);
401 memcpy(mpa->private_data, pdata, plen);
402 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
408 send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
411 struct mpa_message *mpa;
414 CTR4(KTR_IW_CXGB, "%s ep %p so %p plen %d", __FUNCTION__, ep, ep->com.so, plen);
416 mpalen = sizeof(*mpa) + plen;
418 m = m_gethdr(mpalen, M_NOWAIT);
420 printf("%s - cannot alloc mbuf!\n", __FUNCTION__);
423 mpa = mtod(m, struct mpa_message *);
425 m->m_pkthdr.len = mpalen;
426 memset(mpa, 0, sizeof(*mpa));
427 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
428 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
429 (markers_enabled ? MPA_MARKERS : 0);
430 mpa->revision = mpa_rev;
431 mpa->private_data_size = htons(plen);
433 memcpy(mpa->private_data, pdata, plen);
435 state_set(&ep->com, MPA_REP_SENT);
436 return sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
441 close_complete_upcall(struct iwch_ep *ep)
443 struct iw_cm_event event;
445 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
446 memset(&event, 0, sizeof(event));
447 event.event = IW_CM_EVENT_CLOSE;
449 CTR3(KTR_IW_CXGB, "close complete delivered ep %p cm_id %p tid %d",
450 ep, ep->com.cm_id, ep->hwtid);
451 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
452 ep->com.cm_id->rem_ref(ep->com.cm_id);
453 ep->com.cm_id = NULL;
459 abort_connection(struct iwch_ep *ep)
461 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
462 state_set(&ep->com, ABORTING);
464 close_socket(&ep->com, 0);
465 close_complete_upcall(ep);
466 state_set(&ep->com, DEAD);
471 peer_close_upcall(struct iwch_ep *ep)
473 struct iw_cm_event event;
475 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
476 memset(&event, 0, sizeof(event));
477 event.event = IW_CM_EVENT_DISCONNECT;
479 CTR3(KTR_IW_CXGB, "peer close delivered ep %p cm_id %p tid %d",
480 ep, ep->com.cm_id, ep->hwtid);
481 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
486 peer_abort_upcall(struct iwch_ep *ep)
488 struct iw_cm_event event;
490 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
491 memset(&event, 0, sizeof(event));
492 event.event = IW_CM_EVENT_CLOSE;
493 event.status = ECONNRESET;
495 CTR3(KTR_IW_CXGB, "abort delivered ep %p cm_id %p tid %d", ep,
496 ep->com.cm_id, ep->hwtid);
497 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
498 ep->com.cm_id->rem_ref(ep->com.cm_id);
499 ep->com.cm_id = NULL;
505 connect_reply_upcall(struct iwch_ep *ep, int status)
507 struct iw_cm_event event;
509 CTR5(KTR_IW_CXGB, "%s ep %p so %p state %s status %d", __FUNCTION__, ep, ep->com.so, states[ep->com.state], status);
510 memset(&event, 0, sizeof(event));
511 event.event = IW_CM_EVENT_CONNECT_REPLY;
512 event.status = status;
513 event.local_addr = ep->com.local_addr;
514 event.remote_addr = ep->com.remote_addr;
516 if ((status == 0) || (status == ECONNREFUSED)) {
517 event.private_data_len = ep->plen;
518 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
521 CTR4(KTR_IW_CXGB, "%s ep %p tid %d status %d", __FUNCTION__, ep,
523 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
526 ep->com.cm_id->rem_ref(ep->com.cm_id);
527 ep->com.cm_id = NULL;
533 connect_request_upcall(struct iwch_ep *ep)
535 struct iw_cm_event event;
537 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
538 memset(&event, 0, sizeof(event));
539 event.event = IW_CM_EVENT_CONNECT_REQUEST;
540 event.local_addr = ep->com.local_addr;
541 event.remote_addr = ep->com.remote_addr;
542 event.private_data_len = ep->plen;
543 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
544 event.provider_data = ep;
545 event.so = ep->com.so;
546 if (state_read(&ep->parent_ep->com) != DEAD) {
548 ep->parent_ep->com.cm_id->event_handler(
549 ep->parent_ep->com.cm_id,
552 put_ep(&ep->parent_ep->com);
556 established_upcall(struct iwch_ep *ep)
558 struct iw_cm_event event;
560 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
561 memset(&event, 0, sizeof(event));
562 event.event = IW_CM_EVENT_ESTABLISHED;
564 CTR3(KTR_IW_CXGB, "%s ep %p tid %d", __FUNCTION__, ep, ep->hwtid);
565 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
570 process_mpa_reply(struct iwch_ep *ep)
572 struct mpa_message *mpa;
574 struct iwch_qp_attributes attrs;
575 enum iwch_qp_attr_mask mask;
577 struct mbuf *top, *m;
578 int flags = MSG_DONTWAIT;
582 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
585 * Stop mpa timer. If it expired, then the state has
586 * changed and we bail since ep_timeout already aborted
590 if (state_read(&ep->com) != MPA_REQ_SENT)
593 uio.uio_resid = len = 1000000;
594 uio.uio_td = ep->com.thread;
595 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
597 if (err == EWOULDBLOCK) {
605 if (ep->com.so->so_rcv.sb_mb) {
606 printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
607 __FUNCTION__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
613 * If we get more than the supported amount of private data
614 * then we must fail this connection.
616 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
622 * copy the new data into our accumulation buffer.
624 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
625 ep->mpa_pkt_len += m->m_len;
635 * if we don't even have the mpa message, then bail.
637 if (ep->mpa_pkt_len < sizeof(*mpa))
639 mpa = (struct mpa_message *)ep->mpa_pkt;
641 /* Validate MPA header. */
642 if (mpa->revision != mpa_rev) {
643 CTR2(KTR_IW_CXGB, "%s bad mpa rev %d", __FUNCTION__, mpa->revision);
647 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
648 CTR2(KTR_IW_CXGB, "%s bad mpa key |%16s|", __FUNCTION__, mpa->key);
653 plen = ntohs(mpa->private_data_size);
656 * Fail if there's too much private data.
658 if (plen > MPA_MAX_PRIVATE_DATA) {
659 CTR2(KTR_IW_CXGB, "%s plen too big %d", __FUNCTION__, plen);
665 * If plen does not account for pkt size
667 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
668 CTR2(KTR_IW_CXGB, "%s pkt too big %d", __FUNCTION__, ep->mpa_pkt_len);
673 ep->plen = (u8) plen;
676 * If we don't have all the pdata yet, then bail.
677 * We'll continue process when more data arrives.
679 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
682 if (mpa->flags & MPA_REJECT) {
688 * If we get here we have accumulated the entire mpa
689 * start reply message including private data. And
690 * the MPA header is valid.
692 CTR1(KTR_IW_CXGB, "%s mpa rpl looks good!", __FUNCTION__);
693 state_set(&ep->com, FPDU_MODE);
694 ep->mpa_attr.initiator = 1;
695 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
696 ep->mpa_attr.recv_marker_enabled = markers_enabled;
697 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
698 ep->mpa_attr.version = mpa_rev;
699 if (set_tcpinfo(ep)) {
700 printf("%s set_tcpinfo error\n", __FUNCTION__);
703 CTR5(KTR_IW_CXGB, "%s - crc_enabled=%d, recv_marker_enabled=%d, "
704 "xmit_marker_enabled=%d, version=%d", __FUNCTION__,
705 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
706 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
708 attrs.mpa_attr = ep->mpa_attr;
709 attrs.max_ird = ep->ird;
710 attrs.max_ord = ep->ord;
711 attrs.llp_stream_handle = ep;
712 attrs.next_state = IWCH_QP_STATE_RTS;
714 mask = IWCH_QP_ATTR_NEXT_STATE |
715 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
716 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
718 /* bind QP and TID with INIT_WR */
719 err = iwch_modify_qp(ep->com.qp->rhp,
720 ep->com.qp, mask, &attrs, 1);
724 abort_connection(ep);
726 connect_reply_upcall(ep, err);
731 process_mpa_request(struct iwch_ep *ep)
733 struct mpa_message *mpa;
735 int flags = MSG_DONTWAIT;
736 struct mbuf *top, *m;
741 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
744 * Stop mpa timer. If it expired, then the state has
745 * changed and we bail since ep_timeout already aborted
749 if (state_read(&ep->com) != MPA_REQ_WAIT)
752 uio.uio_resid = len = 1000000;
753 uio.uio_td = ep->com.thread;
754 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
756 if (err == EWOULDBLOCK) {
768 * If we get more than the supported amount of private data
769 * then we must fail this connection.
771 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
772 CTR2(KTR_IW_CXGB, "%s mpa message too big %d", __FUNCTION__,
773 ep->mpa_pkt_len + m->m_len);
779 * Copy the new data into our accumulation buffer.
781 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
782 ep->mpa_pkt_len += m->m_len;
793 * If we don't even have the mpa message, then bail.
794 * We'll continue process when more data arrives.
796 if (ep->mpa_pkt_len < sizeof(*mpa)) {
798 CTR2(KTR_IW_CXGB, "%s not enough header %d...waiting...", __FUNCTION__,
802 mpa = (struct mpa_message *) ep->mpa_pkt;
805 * Validate MPA Header.
807 if (mpa->revision != mpa_rev) {
808 CTR2(KTR_IW_CXGB, "%s bad mpa rev %d", __FUNCTION__, mpa->revision);
812 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
813 CTR2(KTR_IW_CXGB, "%s bad mpa key |%16s|", __FUNCTION__, mpa->key);
817 plen = ntohs(mpa->private_data_size);
820 * Fail if there's too much private data.
822 if (plen > MPA_MAX_PRIVATE_DATA) {
823 CTR2(KTR_IW_CXGB, "%s plen too big %d", __FUNCTION__, plen);
828 * If plen does not account for pkt size
830 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
831 CTR2(KTR_IW_CXGB, "%s more data after private data %d", __FUNCTION__,
835 ep->plen = (u8) plen;
838 * If we don't have all the pdata yet, then bail.
840 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
842 CTR2(KTR_IW_CXGB, "%s more mpa msg to come %d", __FUNCTION__,
848 * If we get here we have accumulated the entire mpa
849 * start reply message including private data.
851 ep->mpa_attr.initiator = 0;
852 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
853 ep->mpa_attr.recv_marker_enabled = markers_enabled;
854 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
855 ep->mpa_attr.version = mpa_rev;
856 if (set_tcpinfo(ep)) {
857 printf("%s set_tcpinfo error\n", __FUNCTION__);
860 CTR5(KTR_IW_CXGB, "%s - crc_enabled=%d, recv_marker_enabled=%d, "
861 "xmit_marker_enabled=%d, version=%d", __FUNCTION__,
862 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
863 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
865 state_set(&ep->com, MPA_REQ_RCVD);
868 connect_request_upcall(ep);
871 abort_connection(ep);
876 process_peer_close(struct iwch_ep *ep)
878 struct iwch_qp_attributes attrs;
882 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
884 mtx_lock(&ep->com.lock);
885 switch (ep->com.state) {
887 __state_set(&ep->com, CLOSING);
890 __state_set(&ep->com, CLOSING);
891 connect_reply_upcall(ep, -ECONNRESET);
896 * We're gonna mark this puppy DEAD, but keep
897 * the reference on it until the ULP accepts or
900 __state_set(&ep->com, CLOSING);
903 __state_set(&ep->com, CLOSING);
907 __state_set(&ep->com, CLOSING);
908 attrs.next_state = IWCH_QP_STATE_CLOSING;
909 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
910 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
911 peer_close_upcall(ep);
917 __state_set(&ep->com, MORIBUND);
922 if (ep->com.cm_id && ep->com.qp) {
923 attrs.next_state = IWCH_QP_STATE_IDLE;
924 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
925 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
927 close_socket(&ep->com, 0);
928 close_complete_upcall(ep);
929 __state_set(&ep->com, DEAD);
939 mtx_unlock(&ep->com.lock);
941 iwch_ep_disconnect(ep, 0, M_NOWAIT);
948 process_conn_error(struct iwch_ep *ep)
950 struct iwch_qp_attributes attrs;
953 mtx_lock(&ep->com.lock);
954 CTR3(KTR_IW_CXGB, "%s ep %p state %u", __func__, ep, ep->com.state);
955 switch (ep->com.state) {
961 connect_reply_upcall(ep, -ECONNRESET);
964 ep->com.rpl_err = ECONNRESET;
965 CTR1(KTR_IW_CXGB, "waking up ep %p", ep);
970 * We're gonna mark this puppy DEAD, but keep
971 * the reference on it until the ULP accepts or
980 if (ep->com.cm_id && ep->com.qp) {
981 attrs.next_state = IWCH_QP_STATE_ERROR;
982 ret = iwch_modify_qp(ep->com.qp->rhp,
983 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
987 "%s - qp <- error failed!\n",
990 peer_abort_upcall(ep);
995 mtx_unlock(&ep->com.lock);
996 CTR2(KTR_IW_CXGB, "%s so_error %d IN DEAD STATE!!!!", __FUNCTION__,
997 ep->com.so->so_error);
1004 if (ep->com.state != ABORTING) {
1005 close_socket(&ep->com, 0);
1006 __state_set(&ep->com, DEAD);
1009 mtx_unlock(&ep->com.lock);
1014 process_close_complete(struct iwch_ep *ep)
1016 struct iwch_qp_attributes attrs;
1019 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
1022 /* The cm_id may be null if we failed to connect */
1023 mtx_lock(&ep->com.lock);
1024 switch (ep->com.state) {
1026 __state_set(&ep->com, MORIBUND);
1030 if ((ep->com.cm_id) && (ep->com.qp)) {
1031 attrs.next_state = IWCH_QP_STATE_IDLE;
1032 iwch_modify_qp(ep->com.qp->rhp,
1034 IWCH_QP_ATTR_NEXT_STATE,
1038 close_socket(&ep->com, 1);
1040 close_socket(&ep->com, 0);
1041 close_complete_upcall(ep);
1042 __state_set(&ep->com, DEAD);
1052 mtx_unlock(&ep->com.lock);
1059 * T3A does 3 things when a TERM is received:
1060 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1061 * 2) generate an async event on the QP with the TERMINATE opcode
1062 * 3) post a TERMINATE opcde cqe into the associated CQ.
1064 * For (1), we save the message in the qp for later consumer consumption.
1065 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1066 * For (3), we toss the CQE in cxio_poll_cq().
1068 * terminate() handles case (1)...
1071 terminate(struct sge_qset *qs, struct rsp_desc *r, struct mbuf *m)
1073 struct adapter *sc = qs->adap;
1074 struct tom_data *td = sc->tom_softc;
1075 uint32_t hash = *((uint32_t *)r + 1);
1076 unsigned int tid = ntohl(hash) >> 8 & 0xfffff;
1077 struct toepcb *toep = lookup_tid(&td->tid_maps, tid);
1078 struct socket *so = toep->tp_inp->inp_socket;
1079 struct iwch_ep *ep = so->so_rcv.sb_upcallarg;
1081 if (state_read(&ep->com) != FPDU_MODE)
1084 m_adj(m, sizeof(struct cpl_rdma_terminate));
1086 CTR4(KTR_IW_CXGB, "%s: tid %u, ep %p, saved %d bytes",
1087 __func__, tid, ep, m->m_len);
1089 m_copydata(m, 0, m->m_len, ep->com.qp->attr.terminate_buffer);
1090 ep->com.qp->attr.terminate_msg_len = m->m_len;
1091 ep->com.qp->attr.is_terminate_local = 0;
1099 ec_status(struct sge_qset *qs, struct rsp_desc *r, struct mbuf *m)
1101 struct adapter *sc = qs->adap;
1102 struct tom_data *td = sc->tom_softc;
1103 struct cpl_rdma_ec_status *rep = mtod(m, void *);
1104 unsigned int tid = GET_TID(rep);
1105 struct toepcb *toep = lookup_tid(&td->tid_maps, tid);
1106 struct socket *so = toep->tp_inp->inp_socket;
1107 struct iwch_ep *ep = so->so_rcv.sb_upcallarg;
1110 struct iwch_qp_attributes attrs;
1112 CTR1(KTR_IW_CXGB, "%s BAD CLOSE - Aborting", __FUNCTION__);
1114 attrs.next_state = IWCH_QP_STATE_ERROR;
1115 iwch_modify_qp(ep->com.qp->rhp,
1117 IWCH_QP_ATTR_NEXT_STATE,
1119 abort_connection(ep);
1127 ep_timeout(void *arg)
1129 struct iwch_ep *ep = (struct iwch_ep *)arg;
1130 struct iwch_qp_attributes attrs;
1134 mtx_lock(&ep->com.lock);
1135 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
1136 switch (ep->com.state) {
1138 __state_set(&ep->com, ABORTING);
1139 connect_reply_upcall(ep, -ETIMEDOUT);
1142 __state_set(&ep->com, ABORTING);
1146 if (ep->com.cm_id && ep->com.qp)
1148 __state_set(&ep->com, ABORTING);
1151 CTR3(KTR_IW_CXGB, "%s unexpected state ep %p state %u\n",
1152 __func__, ep, ep->com.state);
1155 mtx_unlock(&ep->com.lock);
1157 attrs.next_state = IWCH_QP_STATE_ERROR;
1158 iwch_modify_qp(ep->com.qp->rhp,
1159 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
1163 abort_connection(ep);
1168 iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1171 struct iwch_ep *ep = to_ep(cm_id);
1172 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
1174 if (state_read(&ep->com) == DEAD) {
1176 return (-ECONNRESET);
1178 PANIC_IF(state_read(&ep->com) != MPA_REQ_RCVD);
1180 abort_connection(ep);
1182 err = send_mpa_reject(ep, pdata, pdata_len);
1183 err = soshutdown(ep->com.so, 3);
1190 iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1193 struct iwch_qp_attributes attrs;
1194 enum iwch_qp_attr_mask mask;
1195 struct iwch_ep *ep = to_ep(cm_id);
1196 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1197 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1199 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
1200 if (state_read(&ep->com) == DEAD) {
1205 PANIC_IF(state_read(&ep->com) != MPA_REQ_RCVD);
1208 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1209 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1210 abort_connection(ep);
1215 cm_id->add_ref(cm_id);
1216 ep->com.cm_id = cm_id;
1219 ep->com.rpl_err = 0;
1220 ep->com.rpl_done = 0;
1221 ep->ird = conn_param->ird;
1222 ep->ord = conn_param->ord;
1223 CTR3(KTR_IW_CXGB, "%s ird %d ord %d", __FUNCTION__, ep->ird, ep->ord);
1225 /* bind QP to EP and move to RTS */
1226 attrs.mpa_attr = ep->mpa_attr;
1227 attrs.max_ird = ep->ird;
1228 attrs.max_ord = ep->ord;
1229 attrs.llp_stream_handle = ep;
1230 attrs.next_state = IWCH_QP_STATE_RTS;
1232 /* bind QP and TID with INIT_WR */
1233 mask = IWCH_QP_ATTR_NEXT_STATE |
1234 IWCH_QP_ATTR_LLP_STREAM_HANDLE |
1235 IWCH_QP_ATTR_MPA_ATTR |
1236 IWCH_QP_ATTR_MAX_IRD |
1237 IWCH_QP_ATTR_MAX_ORD;
1239 err = iwch_modify_qp(ep->com.qp->rhp,
1240 ep->com.qp, mask, &attrs, 1);
1245 err = send_mpa_reply(ep, conn_param->private_data,
1246 conn_param->private_data_len);
1249 state_set(&ep->com, FPDU_MODE);
1250 established_upcall(ep);
1254 ep->com.cm_id = NULL;
1256 cm_id->rem_ref(cm_id);
1262 static int init_sock(struct iwch_ep_common *epc)
1265 struct sockopt sopt;
1269 soupcall_set(epc->so, SO_RCV, iwch_so_upcall, epc);
1270 epc->so->so_state |= SS_NBIO;
1271 SOCK_UNLOCK(epc->so);
1272 sopt.sopt_dir = SOPT_SET;
1273 sopt.sopt_level = IPPROTO_TCP;
1274 sopt.sopt_name = TCP_NODELAY;
1275 sopt.sopt_val = (caddr_t)&on;
1276 sopt.sopt_valsize = sizeof on;
1277 sopt.sopt_td = NULL;
1278 err = sosetopt(epc->so, &sopt);
1280 printf("%s can't set TCP_NODELAY err %d\n", __FUNCTION__, err);
1286 is_loopback_dst(struct iw_cm_id *cm_id)
1288 uint16_t port = cm_id->remote_addr.sin_port;
1291 cm_id->remote_addr.sin_port = 0;
1292 ifa_present = ifa_ifwithaddr_check(
1293 (struct sockaddr *)&cm_id->remote_addr);
1294 cm_id->remote_addr.sin_port = port;
1295 return (ifa_present);
1299 iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1302 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1305 struct toedev *tdev;
1307 if (is_loopback_dst(cm_id)) {
1312 ep = alloc_ep(sizeof(*ep), M_NOWAIT);
1314 printf("%s - cannot alloc ep.\n", __FUNCTION__);
1318 callout_init(&ep->timer, TRUE);
1319 ep->plen = conn_param->private_data_len;
1321 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1322 conn_param->private_data, ep->plen);
1323 ep->ird = conn_param->ird;
1324 ep->ord = conn_param->ord;
1326 cm_id->add_ref(cm_id);
1327 ep->com.cm_id = cm_id;
1328 ep->com.qp = get_qhp(h, conn_param->qpn);
1329 ep->com.thread = curthread;
1330 PANIC_IF(!ep->com.qp);
1331 CTR4(KTR_IW_CXGB, "%s qpn 0x%x qp %p cm_id %p", __FUNCTION__, conn_param->qpn,
1334 ep->com.so = cm_id->so;
1335 err = init_sock(&ep->com);
1340 rt = find_route(cm_id->local_addr.sin_addr.s_addr,
1341 cm_id->remote_addr.sin_addr.s_addr,
1342 cm_id->local_addr.sin_port,
1343 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1345 printf("%s - cannot find route.\n", __FUNCTION__);
1350 if (!(rt->rt_ifp->if_flags & IFCAP_TOE)) {
1351 printf("%s - interface not TOE capable.\n", __FUNCTION__);
1355 tdev = TOEDEV(rt->rt_ifp);
1357 printf("%s - No toedev for interface.\n", __FUNCTION__);
1363 state_set(&ep->com, CONNECTING);
1364 ep->com.local_addr = cm_id->local_addr;
1365 ep->com.remote_addr = cm_id->remote_addr;
1366 err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
1377 iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1380 struct iwch_listen_ep *ep;
1382 ep = alloc_ep(sizeof(*ep), M_NOWAIT);
1384 printf("%s - cannot alloc ep.\n", __FUNCTION__);
1388 CTR2(KTR_IW_CXGB, "%s ep %p", __FUNCTION__, ep);
1389 cm_id->add_ref(cm_id);
1390 ep->com.cm_id = cm_id;
1391 ep->backlog = backlog;
1392 ep->com.local_addr = cm_id->local_addr;
1393 ep->com.thread = curthread;
1394 state_set(&ep->com, LISTEN);
1396 ep->com.so = cm_id->so;
1397 err = init_sock(&ep->com);
1401 err = solisten(ep->com.so, ep->backlog, ep->com.thread);
1403 cm_id->provider_data = ep;
1406 close_socket(&ep->com, 0);
1408 cm_id->rem_ref(cm_id);
1415 iwch_destroy_listen(struct iw_cm_id *cm_id)
1417 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
1419 CTR2(KTR_IW_CXGB, "%s ep %p", __FUNCTION__, ep);
1421 state_set(&ep->com, DEAD);
1422 close_socket(&ep->com, 0);
1423 cm_id->rem_ref(cm_id);
1429 iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, int flags)
1433 mtx_lock(&ep->com.lock);
1436 PANIC_IF(!ep->com.so);
1438 CTR5(KTR_IW_CXGB, "%s ep %p so %p state %s, abrupt %d", __FUNCTION__, ep,
1439 ep->com.so, states[ep->com.state], abrupt);
1441 switch (ep->com.state) {
1449 ep->com.state = ABORTING;
1451 ep->com.state = CLOSING;
1459 ep->com.state = ABORTING;
1461 ep->com.state = MORIBUND;
1466 CTR3(KTR_IW_CXGB, "%s ignoring disconnect ep %p state %u\n",
1467 __func__, ep, ep->com.state);
1470 panic("unknown state: %d\n", ep->com.state);
1474 mtx_unlock(&ep->com.lock);
1477 abort_connection(ep);
1480 __state_set(&ep->com, MORIBUND);
1481 shutdown_socket(&ep->com);
1488 process_data(struct iwch_ep *ep)
1490 struct sockaddr_in *local, *remote;
1492 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
1494 switch (state_read(&ep->com)) {
1496 process_mpa_reply(ep);
1502 * Set local and remote addrs here because when we
1503 * dequeue the newly accepted socket, they aren't set
1506 in_getsockaddr(ep->com.so, (struct sockaddr **)&local);
1507 in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote);
1508 CTR3(KTR_IW_CXGB, "%s local %s remote %s", __FUNCTION__,
1509 inet_ntoa(local->sin_addr),
1510 inet_ntoa(remote->sin_addr));
1511 ep->com.local_addr = *local;
1512 ep->com.remote_addr = *remote;
1513 free(local, M_SONAME);
1514 free(remote, M_SONAME);
1515 process_mpa_request(ep);
1518 if (ep->com.so->so_rcv.sb_cc)
1519 printf("%s Unexpected streaming data."
1520 " ep %p state %d so %p so_state %x so_rcv.sb_cc %u so_rcv.sb_mb %p\n",
1521 __FUNCTION__, ep, state_read(&ep->com), ep->com.so, ep->com.so->so_state,
1522 ep->com.so->so_rcv.sb_cc, ep->com.so->so_rcv.sb_mb);
1529 process_connected(struct iwch_ep *ep)
1531 CTR4(KTR_IW_CXGB, "%s ep %p so %p state %s", __FUNCTION__, ep, ep->com.so, states[ep->com.state]);
1532 if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error) {
1535 connect_reply_upcall(ep, -ep->com.so->so_error);
1536 close_socket(&ep->com, 0);
1537 state_set(&ep->com, DEAD);
1542 static struct socket *
1543 dequeue_socket(struct socket *head, struct sockaddr_in **remote, struct iwch_ep *child_ep)
1548 so = TAILQ_FIRST(&head->so_comp);
1553 TAILQ_REMOVE(&head->so_comp, so, so_list);
1556 so->so_qstate &= ~SQ_COMP;
1559 soupcall_set(so, SO_RCV, iwch_so_upcall, child_ep);
1560 so->so_state |= SS_NBIO;
1561 PANIC_IF(!(so->so_state & SS_ISCONNECTED));
1562 PANIC_IF(so->so_error);
1565 soaccept(so, (struct sockaddr **)remote);
1570 process_newconn(struct iwch_ep *parent_ep)
1572 struct socket *child_so;
1573 struct iwch_ep *child_ep;
1574 struct sockaddr_in *remote;
1576 CTR3(KTR_IW_CXGB, "%s parent ep %p so %p", __FUNCTION__, parent_ep, parent_ep->com.so);
1577 child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT);
1579 log(LOG_ERR, "%s - failed to allocate ep entry!\n",
1583 child_so = dequeue_socket(parent_ep->com.so, &remote, child_ep);
1585 log(LOG_ERR, "%s - failed to dequeue child socket!\n",
1587 __free_ep(&child_ep->com);
1590 CTR3(KTR_IW_CXGB, "%s remote addr %s port %d", __FUNCTION__,
1591 inet_ntoa(remote->sin_addr), ntohs(remote->sin_port));
1592 child_ep->com.tdev = parent_ep->com.tdev;
1593 child_ep->com.local_addr.sin_family = parent_ep->com.local_addr.sin_family;
1594 child_ep->com.local_addr.sin_port = parent_ep->com.local_addr.sin_port;
1595 child_ep->com.local_addr.sin_addr.s_addr = parent_ep->com.local_addr.sin_addr.s_addr;
1596 child_ep->com.local_addr.sin_len = parent_ep->com.local_addr.sin_len;
1597 child_ep->com.remote_addr.sin_family = remote->sin_family;
1598 child_ep->com.remote_addr.sin_port = remote->sin_port;
1599 child_ep->com.remote_addr.sin_addr.s_addr = remote->sin_addr.s_addr;
1600 child_ep->com.remote_addr.sin_len = remote->sin_len;
1601 child_ep->com.so = child_so;
1602 child_ep->com.cm_id = NULL;
1603 child_ep->com.thread = parent_ep->com.thread;
1604 child_ep->parent_ep = parent_ep;
1606 free(remote, M_SONAME);
1607 get_ep(&parent_ep->com);
1608 child_ep->parent_ep = parent_ep;
1609 callout_init(&child_ep->timer, TRUE);
1610 state_set(&child_ep->com, MPA_REQ_WAIT);
1611 start_ep_timer(child_ep);
1613 /* maybe the request has already been queued up on the socket... */
1614 process_mpa_request(child_ep);
1618 iwch_so_upcall(struct socket *so, void *arg, int waitflag)
1620 struct iwch_ep *ep = arg;
1622 CTR6(KTR_IW_CXGB, "%s so %p so state %x ep %p ep state(%d)=%s", __FUNCTION__, so, so->so_state, ep, ep->com.state, states[ep->com.state]);
1623 mtx_lock(&req_lock);
1624 if (ep && ep->com.so && !ep->com.entry.tqe_prev) {
1626 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1627 taskqueue_enqueue(iw_cxgb_taskq, &iw_cxgb_task);
1629 mtx_unlock(&req_lock);
1634 process_socket_event(struct iwch_ep *ep)
1636 int state = state_read(&ep->com);
1637 struct socket *so = ep->com.so;
1639 CTR6(KTR_IW_CXGB, "%s so %p so state %x ep %p ep state(%d)=%s", __FUNCTION__, so, so->so_state, ep, ep->com.state, states[ep->com.state]);
1640 if (state == CONNECTING) {
1641 process_connected(ep);
1645 if (state == LISTEN) {
1646 process_newconn(ep);
1650 /* connection error */
1652 process_conn_error(ep);
1657 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state < CLOSING) {
1658 process_peer_close(ep);
1662 /* close complete */
1663 if (so->so_state & (SS_ISDISCONNECTED)) {
1664 process_close_complete(ep);
1674 process_req(void *ctx, int pending)
1676 struct iwch_ep_common *epc;
1678 CTR1(KTR_IW_CXGB, "%s enter", __FUNCTION__);
1679 mtx_lock(&req_lock);
1680 while (!TAILQ_EMPTY(&req_list)) {
1681 epc = TAILQ_FIRST(&req_list);
1682 TAILQ_REMOVE(&req_list, epc, entry);
1683 epc->entry.tqe_prev = NULL;
1684 mtx_unlock(&req_lock);
1686 process_socket_event((struct iwch_ep *)epc);
1688 mtx_lock(&req_lock);
1690 mtx_unlock(&req_lock);
1696 TAILQ_INIT(&req_list);
1697 mtx_init(&req_lock, "iw_cxgb req_list lock", NULL, MTX_DEF);
1698 iw_cxgb_taskq = taskqueue_create("iw_cxgb_taskq", M_NOWAIT,
1699 taskqueue_thread_enqueue, &iw_cxgb_taskq);
1700 if (iw_cxgb_taskq == NULL) {
1701 printf("failed to allocate iw_cxgb taskqueue\n");
1704 taskqueue_start_threads(&iw_cxgb_taskq, 1, PI_NET, "iw_cxgb taskq");
1705 TASK_INIT(&iw_cxgb_task, 0, process_req, NULL);
1713 taskqueue_drain(iw_cxgb_taskq, &iw_cxgb_task);
1714 taskqueue_free(iw_cxgb_taskq);
1718 iwch_cm_init_cpl(struct adapter *sc)
1721 t3_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate);
1722 t3_register_cpl_handler(sc, CPL_RDMA_EC_STATUS, ec_status);
1726 iwch_cm_term_cpl(struct adapter *sc)
1729 t3_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL);
1730 t3_register_cpl_handler(sc, CPL_RDMA_EC_STATUS, NULL);