2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/neighbour.h>
46 #include <net/route.h>
48 #include <netinet/in_systm.h>
49 #include <netinet/in_pcb.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet/tcp_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcpip.h>
56 #include <netinet/toecore.h>
60 #include <linux/types.h>
62 #include "tom/t4_tom.h"
64 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe))
67 #include <linux/module.h>
68 #include <linux/workqueue.h>
69 #include <linux/notifier.h>
70 #include <linux/inetdevice.h>
71 #include <linux/if_vlan.h>
72 #include <net/netevent.h>
74 static spinlock_t req_lock;
75 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
76 static struct work_struct c4iw_task;
77 static struct workqueue_struct *c4iw_taskq;
78 static LIST_HEAD(timeout_list);
79 static spinlock_t timeout_lock;
81 static void process_req(struct work_struct *ctx);
82 static void start_ep_timer(struct c4iw_ep *ep);
83 static void stop_ep_timer(struct c4iw_ep *ep);
84 static int set_tcpinfo(struct c4iw_ep *ep);
85 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc);
86 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
87 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
88 static void *alloc_ep(int size, gfp_t flags);
89 void __free_ep(struct c4iw_ep_common *epc);
90 static struct rtentry * find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
91 __be16 peer_port, u8 tos);
92 static int close_socket(struct c4iw_ep_common *epc, int close);
93 static int shutdown_socket(struct c4iw_ep_common *epc);
94 static void abort_socket(struct c4iw_ep *ep);
95 static void send_mpa_req(struct c4iw_ep *ep);
96 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
97 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
98 static void close_complete_upcall(struct c4iw_ep *ep);
99 static int abort_connection(struct c4iw_ep *ep);
100 static void peer_close_upcall(struct c4iw_ep *ep);
101 static void peer_abort_upcall(struct c4iw_ep *ep);
102 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
103 static void connect_request_upcall(struct c4iw_ep *ep);
104 static void established_upcall(struct c4iw_ep *ep);
105 static void process_mpa_reply(struct c4iw_ep *ep);
106 static void process_mpa_request(struct c4iw_ep *ep);
107 static void process_peer_close(struct c4iw_ep *ep);
108 static void process_conn_error(struct c4iw_ep *ep);
109 static void process_close_complete(struct c4iw_ep *ep);
110 static void ep_timeout(unsigned long arg);
111 static void init_sock(struct c4iw_ep_common *epc);
112 static void process_data(struct c4iw_ep *ep);
113 static void process_connected(struct c4iw_ep *ep);
114 static struct socket * dequeue_socket(struct socket *head, struct sockaddr_in **remote, struct c4iw_ep *child_ep);
115 static void process_newconn(struct c4iw_ep *parent_ep);
116 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
117 static void process_socket_event(struct c4iw_ep *ep);
118 static void release_ep_resources(struct c4iw_ep *ep);
120 #define START_EP_TIMER(ep) \
122 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
123 __func__, __LINE__, (ep)); \
124 start_ep_timer(ep); \
127 #define STOP_EP_TIMER(ep) \
129 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
130 __func__, __LINE__, (ep)); \
135 static char *states[] = {
153 process_req(struct work_struct *ctx)
155 struct c4iw_ep_common *epc;
157 spin_lock(&req_lock);
158 while (!TAILQ_EMPTY(&req_list)) {
159 epc = TAILQ_FIRST(&req_list);
160 TAILQ_REMOVE(&req_list, epc, entry);
161 epc->entry.tqe_prev = NULL;
162 spin_unlock(&req_lock);
164 process_socket_event((struct c4iw_ep *)epc);
166 spin_lock(&req_lock);
168 spin_unlock(&req_lock);
172 * XXX: doesn't belong here in the iWARP driver.
173 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
174 * set. Is this a valid assumption for active open?
177 set_tcpinfo(struct c4iw_ep *ep)
179 struct socket *so = ep->com.so;
180 struct inpcb *inp = sotoinpcb(so);
187 if ((tp->t_flags & TF_TOE) == 0) {
189 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
195 ep->hwtid = toep->tid;
196 ep->snd_seq = tp->snd_nxt;
197 ep->rcv_seq = tp->rcv_nxt;
198 ep->emss = max(tp->t_maxseg, 128);
205 static struct rtentry *
206 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
207 __be16 peer_port, u8 tos)
209 struct route iproute;
210 struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst;
212 CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip,
213 peer_ip, ntohs(local_port), ntohs(peer_port));
214 bzero(&iproute, sizeof iproute);
215 dst->sin_family = AF_INET;
216 dst->sin_len = sizeof *dst;
217 dst->sin_addr.s_addr = peer_ip;
220 CTR2(KTR_IW_CXGBE, "%s:frtE %p", __func__, (uint64_t)iproute.ro_rt);
221 return iproute.ro_rt;
225 close_socket(struct c4iw_ep_common *epc, int close)
227 struct socket *so = epc->so;
230 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc, so,
234 soupcall_clear(so, SO_RCV);
240 rc = soshutdown(so, SHUT_WR | SHUT_RD);
247 shutdown_socket(struct c4iw_ep_common *epc)
250 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc->so, epc,
253 return (soshutdown(epc->so, SHUT_WR));
257 abort_socket(struct c4iw_ep *ep)
263 CTR4(KTR_IW_CXGBE, "%s ep %p so %p state %s", __func__, ep, ep->com.so,
264 states[ep->com.state]);
269 /* linger_time of 0 forces RST to be sent */
270 sopt.sopt_dir = SOPT_SET;
271 sopt.sopt_level = SOL_SOCKET;
272 sopt.sopt_name = SO_LINGER;
273 sopt.sopt_val = (caddr_t)&l;
274 sopt.sopt_valsize = sizeof l;
276 rc = sosetopt(ep->com.so, &sopt);
278 log(LOG_ERR, "%s: can't set linger to 0, no RST! err %d\n",
284 process_peer_close(struct c4iw_ep *ep)
286 struct c4iw_qp_attributes attrs;
290 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
291 ep->com.so, states[ep->com.state]);
293 mutex_lock(&ep->com.mutex);
294 switch (ep->com.state) {
297 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING",
299 __state_set(&ep->com, CLOSING);
303 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING",
305 __state_set(&ep->com, DEAD);
306 connect_reply_upcall(ep, -ECONNABORTED);
310 close_socket(&ep->com, 0);
311 ep->com.cm_id->rem_ref(ep->com.cm_id);
312 ep->com.cm_id = NULL;
320 * We're gonna mark this puppy DEAD, but keep
321 * the reference on it until the ULP accepts or
324 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
326 __state_set(&ep->com, CLOSING);
327 c4iw_get_ep(&ep->com);
331 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
333 __state_set(&ep->com, CLOSING);
337 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
340 __state_set(&ep->com, CLOSING);
341 attrs.next_state = C4IW_QP_STATE_CLOSING;
342 c4iw_modify_qp(ep->com.dev, ep->com.qp,
343 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
344 peer_close_upcall(ep);
348 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
354 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
356 __state_set(&ep->com, MORIBUND);
361 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
364 if (ep->com.cm_id && ep->com.qp) {
365 attrs.next_state = C4IW_QP_STATE_IDLE;
366 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
367 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
369 close_socket(&ep->com, 0);
370 close_complete_upcall(ep);
371 __state_set(&ep->com, DEAD);
377 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
383 panic("%s: ep %p state %d", __func__, ep,
388 mutex_unlock(&ep->com.mutex);
392 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
393 c4iw_ep_disconnect(ep, 0, M_NOWAIT);
397 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
398 c4iw_put_ep(&ep->com);
400 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
405 process_conn_error(struct c4iw_ep *ep)
407 struct c4iw_qp_attributes attrs;
411 state = state_read(&ep->com);
412 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
413 __func__, ep, ep->com.so, ep->com.so->so_error,
414 states[ep->com.state]);
424 connect_reply_upcall(ep, -ECONNRESET);
428 ep->com.rpl_err = ECONNRESET;
429 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
435 * We're gonna mark this puppy DEAD, but keep
436 * the reference on it until the ULP accepts or
439 c4iw_get_ep(&ep->com);
448 if (ep->com.cm_id && ep->com.qp) {
450 attrs.next_state = C4IW_QP_STATE_ERROR;
451 ret = c4iw_modify_qp(ep->com.qp->rhp,
452 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
456 "%s - qp <- error failed!\n",
459 peer_abort_upcall(ep);
466 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
467 __func__, ep->com.so->so_error);
471 panic("%s: ep %p state %d", __func__, ep, state);
475 if (state != ABORTING) {
477 CTR2(KTR_IW_CXGBE, "%s:pce1 %p", __func__, ep);
478 close_socket(&ep->com, 0);
479 state_set(&ep->com, DEAD);
480 c4iw_put_ep(&ep->com);
482 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
487 process_close_complete(struct c4iw_ep *ep)
489 struct c4iw_qp_attributes attrs;
492 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
493 ep->com.so, states[ep->com.state]);
495 /* The cm_id may be null if we failed to connect */
496 mutex_lock(&ep->com.mutex);
498 switch (ep->com.state) {
501 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
503 __state_set(&ep->com, MORIBUND);
507 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
511 if ((ep->com.cm_id) && (ep->com.qp)) {
513 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
515 attrs.next_state = C4IW_QP_STATE_IDLE;
516 c4iw_modify_qp(ep->com.dev,
518 C4IW_QP_ATTR_NEXT_STATE,
524 CTR2(KTR_IW_CXGBE, "%s:pcc3 %p", __func__, ep);
525 close_socket(&ep->com, 1);
529 CTR2(KTR_IW_CXGBE, "%s:pcc4 %p", __func__, ep);
530 close_socket(&ep->com, 0);
532 close_complete_upcall(ep);
533 __state_set(&ep->com, DEAD);
538 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
543 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
544 panic("%s:pcc6 %p DEAD", __func__, ep);
547 mutex_unlock(&ep->com.mutex);
551 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p", __func__, ep);
552 c4iw_put_ep(&ep->com);
554 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
559 init_sock(struct c4iw_ep_common *epc)
563 struct socket *so = epc->so;
567 soupcall_set(so, SO_RCV, c4iw_so_upcall, epc);
568 so->so_state |= SS_NBIO;
570 sopt.sopt_dir = SOPT_SET;
571 sopt.sopt_level = IPPROTO_TCP;
572 sopt.sopt_name = TCP_NODELAY;
573 sopt.sopt_val = (caddr_t)&on;
574 sopt.sopt_valsize = sizeof on;
576 rc = sosetopt(so, &sopt);
578 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
584 process_data(struct c4iw_ep *ep)
586 struct sockaddr_in *local, *remote;
588 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__,
589 ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc);
591 switch (state_read(&ep->com)) {
593 process_mpa_reply(ep);
596 in_getsockaddr(ep->com.so, (struct sockaddr **)&local);
597 in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote);
598 ep->com.local_addr = *local;
599 ep->com.remote_addr = *remote;
600 free(local, M_SONAME);
601 free(remote, M_SONAME);
602 process_mpa_request(ep);
605 if (ep->com.so->so_rcv.sb_cc)
606 log(LOG_ERR, "%s: Unexpected streaming data. "
607 "ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n",
608 __func__, ep, state_read(&ep->com), ep->com.so,
609 ep->com.so->so_state, ep->com.so->so_rcv.sb_cc);
615 process_connected(struct c4iw_ep *ep)
618 if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error)
621 connect_reply_upcall(ep, -ep->com.so->so_error);
622 close_socket(&ep->com, 0);
623 state_set(&ep->com, DEAD);
624 c4iw_put_ep(&ep->com);
628 static struct socket *
629 dequeue_socket(struct socket *head, struct sockaddr_in **remote,
630 struct c4iw_ep *child_ep)
635 so = TAILQ_FIRST(&head->so_comp);
640 TAILQ_REMOVE(&head->so_comp, so, so_list);
643 so->so_qstate &= ~SQ_COMP;
646 soupcall_set(so, SO_RCV, c4iw_so_upcall, child_ep);
647 so->so_state |= SS_NBIO;
650 soaccept(so, (struct sockaddr **)remote);
656 process_newconn(struct c4iw_ep *parent_ep)
658 struct socket *child_so;
659 struct c4iw_ep *child_ep;
660 struct sockaddr_in *remote;
662 child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT);
664 CTR3(KTR_IW_CXGBE, "%s: parent so %p, parent ep %p, ENOMEM",
665 __func__, parent_ep->com.so, parent_ep);
666 log(LOG_ERR, "%s: failed to allocate ep entry\n", __func__);
670 child_so = dequeue_socket(parent_ep->com.so, &remote, child_ep);
673 "%s: parent so %p, parent ep %p, child ep %p, dequeue err",
674 __func__, parent_ep->com.so, parent_ep, child_ep);
675 log(LOG_ERR, "%s: failed to dequeue child socket\n", __func__);
676 __free_ep(&child_ep->com);
682 "%s: parent so %p, parent ep %p, child so %p, child ep %p",
683 __func__, parent_ep->com.so, parent_ep, child_so, child_ep);
685 child_ep->com.local_addr = parent_ep->com.local_addr;
686 child_ep->com.remote_addr = *remote;
687 child_ep->com.dev = parent_ep->com.dev;
688 child_ep->com.so = child_so;
689 child_ep->com.cm_id = NULL;
690 child_ep->com.thread = parent_ep->com.thread;
691 child_ep->parent_ep = parent_ep;
693 free(remote, M_SONAME);
694 c4iw_get_ep(&parent_ep->com);
695 child_ep->parent_ep = parent_ep;
696 init_timer(&child_ep->timer);
697 state_set(&child_ep->com, MPA_REQ_WAIT);
698 START_EP_TIMER(child_ep);
700 /* maybe the request has already been queued up on the socket... */
701 process_mpa_request(child_ep);
705 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
707 struct c4iw_ep *ep = arg;
709 spin_lock(&req_lock);
712 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
713 __func__, so, so->so_state, ep, states[ep->com.state],
714 ep->com.entry.tqe_prev);
716 if (ep && ep->com.so && !ep->com.entry.tqe_prev) {
717 KASSERT(ep->com.so == so, ("%s: XXX review.", __func__));
718 c4iw_get_ep(&ep->com);
719 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
720 queue_work(c4iw_taskq, &c4iw_task);
723 spin_unlock(&req_lock);
728 process_socket_event(struct c4iw_ep *ep)
730 int state = state_read(&ep->com);
731 struct socket *so = ep->com.so;
733 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
734 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
735 so->so_error, so->so_rcv.sb_state, ep, states[state]);
737 if (state == CONNECTING) {
738 process_connected(ep);
742 if (state == LISTEN) {
747 /* connection error */
749 process_conn_error(ep);
754 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state < CLOSING) {
755 process_peer_close(ep);
760 if (so->so_state & SS_ISDISCONNECTED) {
761 process_close_complete(ep);
769 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
771 int db_delay_usecs = 1;
772 TUNABLE_INT("hw.iw_cxgbe.db_delay_usecs", &db_delay_usecs);
773 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RW, &db_delay_usecs, 0,
774 "Usecs to delay awaiting db fifo to drain");
776 static int dack_mode = 1;
777 TUNABLE_INT("hw.iw_cxgbe.dack_mode", &dack_mode);
778 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RW, &dack_mode, 0,
779 "Delayed ack mode (default = 1)");
781 int c4iw_max_read_depth = 8;
782 TUNABLE_INT("hw.iw_cxgbe.c4iw_max_read_depth", &c4iw_max_read_depth);
783 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RW, &c4iw_max_read_depth, 0,
784 "Per-connection max ORD/IRD (default = 8)");
786 static int enable_tcp_timestamps;
787 TUNABLE_INT("hw.iw_cxgbe.enable_tcp_timestamps", &enable_tcp_timestamps);
788 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RW, &enable_tcp_timestamps, 0,
789 "Enable tcp timestamps (default = 0)");
791 static int enable_tcp_sack;
792 TUNABLE_INT("hw.iw_cxgbe.enable_tcp_sack", &enable_tcp_sack);
793 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RW, &enable_tcp_sack, 0,
794 "Enable tcp SACK (default = 0)");
796 static int enable_tcp_window_scaling = 1;
797 TUNABLE_INT("hw.iw_cxgbe.enable_tcp_window_scaling", &enable_tcp_window_scaling);
798 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RW, &enable_tcp_window_scaling, 0,
799 "Enable tcp window scaling (default = 1)");
802 TUNABLE_INT("hw.iw_cxgbe.c4iw_debug", &c4iw_debug);
803 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RW, &c4iw_debug, 0,
804 "Enable debug logging (default = 0)");
806 static int peer2peer;
807 TUNABLE_INT("hw.iw_cxgbe.peer2peer", &peer2peer);
808 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 0,
809 "Support peer2peer ULPs (default = 0)");
811 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
812 TUNABLE_INT("hw.iw_cxgbe.p2p_type", &p2p_type);
813 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RW, &p2p_type, 0,
814 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
816 static int ep_timeout_secs = 60;
817 TUNABLE_INT("hw.iw_cxgbe.ep_timeout_secs", &ep_timeout_secs);
818 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
819 "CM Endpoint operation timeout in seconds (default = 60)");
821 static int mpa_rev = 1;
822 TUNABLE_INT("hw.iw_cxgbe.mpa_rev", &mpa_rev);
824 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
825 "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
827 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
828 "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)");
831 static int markers_enabled;
832 TUNABLE_INT("hw.iw_cxgbe.markers_enabled", &markers_enabled);
833 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
834 "Enable MPA MARKERS (default(0) = disabled)");
836 static int crc_enabled = 1;
837 TUNABLE_INT("hw.iw_cxgbe.crc_enabled", &crc_enabled);
838 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
839 "Enable MPA CRC (default(1) = enabled)");
841 static int rcv_win = 256 * 1024;
842 TUNABLE_INT("hw.iw_cxgbe.rcv_win", &rcv_win);
843 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
844 "TCP receive window in bytes (default = 256KB)");
846 static int snd_win = 128 * 1024;
847 TUNABLE_INT("hw.iw_cxgbe.snd_win", &snd_win);
848 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
849 "TCP send window in bytes (default = 128KB)");
851 int db_fc_threshold = 2000;
852 TUNABLE_INT("hw.iw_cxgbe.db_fc_threshold", &db_fc_threshold);
853 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RW, &db_fc_threshold, 0,
854 "QP count/threshold that triggers automatic");
857 start_ep_timer(struct c4iw_ep *ep)
860 if (timer_pending(&ep->timer)) {
861 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
862 printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
866 clear_bit(TIMEOUT, &ep->com.flags);
867 c4iw_get_ep(&ep->com);
868 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
869 ep->timer.data = (unsigned long)ep;
870 ep->timer.function = ep_timeout;
871 add_timer(&ep->timer);
875 stop_ep_timer(struct c4iw_ep *ep)
878 del_timer_sync(&ep->timer);
879 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
880 c4iw_put_ep(&ep->com);
885 c4iw_ep_state state_read(struct c4iw_ep_common *epc)
887 enum c4iw_ep_state state;
889 mutex_lock(&epc->mutex);
891 mutex_unlock(&epc->mutex);
897 __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
904 state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
907 mutex_lock(&epc->mutex);
908 __state_set(epc, new);
909 mutex_unlock(&epc->mutex);
913 alloc_ep(int size, gfp_t gfp)
915 struct c4iw_ep_common *epc;
917 epc = kzalloc(size, gfp);
921 kref_init(&epc->kref);
922 mutex_init(&epc->mutex);
923 c4iw_init_wr_wait(&epc->wr_wait);
929 __free_ep(struct c4iw_ep_common *epc)
931 CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc);
932 KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so));
933 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc));
935 CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc);
938 void _c4iw_free_ep(struct kref *kref)
941 struct c4iw_ep_common *epc;
943 ep = container_of(kref, struct c4iw_ep, com.kref);
945 KASSERT(!epc->so, ("%s ep->so %p", __func__, epc->so));
946 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
951 static void release_ep_resources(struct c4iw_ep *ep)
953 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
954 set_bit(RELEASE_RESOURCES, &ep->com.flags);
955 c4iw_put_ep(&ep->com);
956 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
960 send_mpa_req(struct c4iw_ep *ep)
963 struct mpa_message *mpa;
964 struct mpa_v2_conn_params mpa_v2_params;
966 char mpa_rev_to_use = mpa_rev;
969 if (ep->retry_with_mpa_v1)
971 mpalen = sizeof(*mpa) + ep->plen;
972 if (mpa_rev_to_use == 2)
973 mpalen += sizeof(struct mpa_v2_conn_params);
976 CXGBE_UNIMPLEMENTED(__func__);
978 m = m_gethdr(M_NOWAIT, MT_DATA);
980 connect_reply_upcall(ep, -ENOMEM);
984 mpa = mtod(m, struct mpa_message *);
986 m->m_pkthdr.len = mpalen;
987 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
988 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
989 (markers_enabled ? MPA_MARKERS : 0) |
990 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
991 mpa->private_data_size = htons(ep->plen);
992 mpa->revision = mpa_rev_to_use;
994 if (mpa_rev_to_use == 1) {
995 ep->tried_with_mpa_v1 = 1;
996 ep->retry_with_mpa_v1 = 0;
999 if (mpa_rev_to_use == 2) {
1000 mpa->private_data_size +=
1001 htons(sizeof(struct mpa_v2_conn_params));
1002 mpa_v2_params.ird = htons((u16)ep->ird);
1003 mpa_v2_params.ord = htons((u16)ep->ord);
1006 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1008 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1009 mpa_v2_params.ord |=
1010 htons(MPA_V2_RDMA_WRITE_RTR);
1011 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1012 mpa_v2_params.ord |=
1013 htons(MPA_V2_RDMA_READ_RTR);
1016 memcpy(mpa->private_data, &mpa_v2_params,
1017 sizeof(struct mpa_v2_conn_params));
1021 memcpy(mpa->private_data +
1022 sizeof(struct mpa_v2_conn_params),
1023 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1028 memcpy(mpa->private_data,
1029 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1030 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1033 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1035 connect_reply_upcall(ep, -ENOMEM);
1040 state_set(&ep->com, MPA_REQ_SENT);
1041 ep->mpa_attr.initiator = 1;
1044 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1047 struct mpa_message *mpa;
1048 struct mpa_v2_conn_params mpa_v2_params;
1052 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1055 mpalen = sizeof(*mpa) + plen;
1057 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1059 mpalen += sizeof(struct mpa_v2_conn_params);
1060 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1061 ep->mpa_attr.version, mpalen);
1065 CXGBE_UNIMPLEMENTED(__func__);
1067 m = m_gethdr(M_NOWAIT, MT_DATA);
1070 printf("%s - cannot alloc mbuf!\n", __func__);
1071 CTR2(KTR_IW_CXGBE, "%s:smrej2 %p", __func__, ep);
1076 mpa = mtod(m, struct mpa_message *);
1078 m->m_pkthdr.len = mpalen;
1079 memset(mpa, 0, sizeof(*mpa));
1080 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1081 mpa->flags = MPA_REJECT;
1082 mpa->revision = mpa_rev;
1083 mpa->private_data_size = htons(plen);
1085 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1087 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1088 mpa->private_data_size +=
1089 htons(sizeof(struct mpa_v2_conn_params));
1090 mpa_v2_params.ird = htons(((u16)ep->ird) |
1091 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1093 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1095 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1096 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1097 FW_RI_INIT_P2PTYPE_READ_REQ ?
1098 MPA_V2_RDMA_READ_RTR : 0) : 0));
1099 memcpy(mpa->private_data, &mpa_v2_params,
1100 sizeof(struct mpa_v2_conn_params));
1103 memcpy(mpa->private_data +
1104 sizeof(struct mpa_v2_conn_params), pdata, plen);
1105 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1106 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1109 memcpy(mpa->private_data, pdata, plen);
1111 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1113 ep->snd_seq += mpalen;
1114 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1118 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1121 struct mpa_message *mpa;
1123 struct mpa_v2_conn_params mpa_v2_params;
1126 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1128 mpalen = sizeof(*mpa) + plen;
1130 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1132 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1133 ep->mpa_attr.version);
1134 mpalen += sizeof(struct mpa_v2_conn_params);
1138 CXGBE_UNIMPLEMENTED(__func__);
1140 m = m_gethdr(M_NOWAIT, MT_DATA);
1143 CTR2(KTR_IW_CXGBE, "%s:smrep2 %p", __func__, ep);
1144 printf("%s - cannot alloc mbuf!\n", __func__);
1149 mpa = mtod(m, struct mpa_message *);
1151 m->m_pkthdr.len = mpalen;
1152 memset(mpa, 0, sizeof(*mpa));
1153 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1154 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1155 (markers_enabled ? MPA_MARKERS : 0);
1156 mpa->revision = ep->mpa_attr.version;
1157 mpa->private_data_size = htons(plen);
1159 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1161 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1162 mpa->private_data_size +=
1163 htons(sizeof(struct mpa_v2_conn_params));
1164 mpa_v2_params.ird = htons((u16)ep->ird);
1165 mpa_v2_params.ord = htons((u16)ep->ord);
1166 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1167 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1169 if (peer2peer && (ep->mpa_attr.p2p_type !=
1170 FW_RI_INIT_P2PTYPE_DISABLED)) {
1172 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1174 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1176 mpa_v2_params.ord |=
1177 htons(MPA_V2_RDMA_WRITE_RTR);
1178 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1179 __func__, ep, p2p_type, mpa_v2_params.ird,
1182 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1184 mpa_v2_params.ord |=
1185 htons(MPA_V2_RDMA_READ_RTR);
1186 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1187 __func__, ep, p2p_type, mpa_v2_params.ird,
1192 memcpy(mpa->private_data, &mpa_v2_params,
1193 sizeof(struct mpa_v2_conn_params));
1196 memcpy(mpa->private_data +
1197 sizeof(struct mpa_v2_conn_params), pdata, plen);
1200 memcpy(mpa->private_data, pdata, plen);
1202 state_set(&ep->com, MPA_REP_SENT);
1203 ep->snd_seq += mpalen;
1204 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1206 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1212 static void close_complete_upcall(struct c4iw_ep *ep)
1214 struct iw_cm_event event;
1216 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1217 memset(&event, 0, sizeof(event));
1218 event.event = IW_CM_EVENT_CLOSE;
1220 if (ep->com.cm_id) {
1222 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1223 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1224 ep->com.cm_id->rem_ref(ep->com.cm_id);
1225 ep->com.cm_id = NULL;
1227 set_bit(CLOSE_UPCALL, &ep->com.history);
1229 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1232 static int abort_connection(struct c4iw_ep *ep)
1236 CTR2(KTR_IW_CXGBE, "%s:abB %p", __func__, ep);
1237 close_complete_upcall(ep);
1238 state_set(&ep->com, ABORTING);
1240 err = close_socket(&ep->com, 0);
1241 set_bit(ABORT_CONN, &ep->com.history);
1242 CTR2(KTR_IW_CXGBE, "%s:abE %p", __func__, ep);
1246 static void peer_close_upcall(struct c4iw_ep *ep)
1248 struct iw_cm_event event;
1250 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1251 memset(&event, 0, sizeof(event));
1252 event.event = IW_CM_EVENT_DISCONNECT;
1254 if (ep->com.cm_id) {
1256 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1257 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1258 set_bit(DISCONN_UPCALL, &ep->com.history);
1260 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1263 static void peer_abort_upcall(struct c4iw_ep *ep)
1265 struct iw_cm_event event;
1267 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1268 memset(&event, 0, sizeof(event));
1269 event.event = IW_CM_EVENT_CLOSE;
1270 event.status = -ECONNRESET;
1272 if (ep->com.cm_id) {
1274 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1275 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1276 ep->com.cm_id->rem_ref(ep->com.cm_id);
1277 ep->com.cm_id = NULL;
1279 set_bit(ABORT_UPCALL, &ep->com.history);
1281 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1284 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1286 struct iw_cm_event event;
1288 CTR3(KTR_IW_CXGBE, "%s:cruB %p", __func__, ep, status);
1289 memset(&event, 0, sizeof(event));
1290 event.event = IW_CM_EVENT_CONNECT_REPLY;
1291 event.status = (status ==-ECONNABORTED)?-ECONNRESET: status;
1292 event.local_addr = ep->com.local_addr;
1293 event.remote_addr = ep->com.remote_addr;
1295 if ((status == 0) || (status == -ECONNREFUSED)) {
1297 if (!ep->tried_with_mpa_v1) {
1299 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1300 /* this means MPA_v2 is used */
1301 event.private_data_len = ep->plen -
1302 sizeof(struct mpa_v2_conn_params);
1303 event.private_data = ep->mpa_pkt +
1304 sizeof(struct mpa_message) +
1305 sizeof(struct mpa_v2_conn_params);
1308 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1309 /* this means MPA_v1 is used */
1310 event.private_data_len = ep->plen;
1311 event.private_data = ep->mpa_pkt +
1312 sizeof(struct mpa_message);
1316 if (ep->com.cm_id) {
1318 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1319 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1320 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1323 if(status == -ECONNABORTED) {
1325 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1331 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1332 ep->com.cm_id->rem_ref(ep->com.cm_id);
1333 ep->com.cm_id = NULL;
1337 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1340 static void connect_request_upcall(struct c4iw_ep *ep)
1342 struct iw_cm_event event;
1344 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1345 ep->tried_with_mpa_v1);
1347 memset(&event, 0, sizeof(event));
1348 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1349 event.local_addr = ep->com.local_addr;
1350 event.remote_addr = ep->com.remote_addr;
1351 event.provider_data = ep;
1352 event.so = ep->com.so;
1354 if (!ep->tried_with_mpa_v1) {
1355 /* this means MPA_v2 is used */
1357 event.ord = ep->ord;
1358 event.ird = ep->ird;
1360 event.private_data_len = ep->plen -
1361 sizeof(struct mpa_v2_conn_params);
1362 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1363 sizeof(struct mpa_v2_conn_params);
1366 /* this means MPA_v1 is used. Send max supported */
1368 event.ord = c4iw_max_read_depth;
1369 event.ird = c4iw_max_read_depth;
1371 event.private_data_len = ep->plen;
1372 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1375 c4iw_get_ep(&ep->com);
1376 ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1378 set_bit(CONNREQ_UPCALL, &ep->com.history);
1379 c4iw_put_ep(&ep->parent_ep->com);
1382 static void established_upcall(struct c4iw_ep *ep)
1384 struct iw_cm_event event;
1386 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1387 memset(&event, 0, sizeof(event));
1388 event.event = IW_CM_EVENT_ESTABLISHED;
1390 event.ird = ep->ird;
1391 event.ord = ep->ord;
1393 if (ep->com.cm_id) {
1395 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1396 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1397 set_bit(ESTAB_UPCALL, &ep->com.history);
1399 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1404 static void process_mpa_reply(struct c4iw_ep *ep)
1406 struct mpa_message *mpa;
1407 struct mpa_v2_conn_params *mpa_v2_params;
1409 u16 resp_ird, resp_ord;
1410 u8 rtr_mismatch = 0, insuff_ird = 0;
1411 struct c4iw_qp_attributes attrs;
1412 enum c4iw_qp_attr_mask mask;
1414 struct mbuf *top, *m;
1415 int flags = MSG_DONTWAIT;
1418 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1421 * Stop mpa timer. If it expired, then the state has
1422 * changed and we bail since ep_timeout already aborted
1426 if (state_read(&ep->com) != MPA_REQ_SENT)
1429 uio.uio_resid = 1000000;
1430 uio.uio_td = ep->com.thread;
1431 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1435 if (err == EWOULDBLOCK) {
1437 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1442 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1446 if (ep->com.so->so_rcv.sb_mb) {
1448 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1449 printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1450 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1457 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1459 * If we get more than the supported amount of private data
1460 * then we must fail this connection.
1462 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1464 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1465 ep->mpa_pkt_len + m->m_len);
1471 * copy the new data into our accumulation buffer.
1473 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1474 ep->mpa_pkt_len += m->m_len;
1483 * if we don't even have the mpa message, then bail.
1485 if (ep->mpa_pkt_len < sizeof(*mpa))
1487 mpa = (struct mpa_message *) ep->mpa_pkt;
1489 /* Validate MPA header. */
1490 if (mpa->revision > mpa_rev) {
1492 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1493 mpa->revision, mpa_rev);
1494 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1495 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1500 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1502 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1507 plen = ntohs(mpa->private_data_size);
1510 * Fail if there's too much private data.
1512 if (plen > MPA_MAX_PRIVATE_DATA) {
1514 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1520 * If plen does not account for pkt size
1522 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1524 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1529 ep->plen = (u8) plen;
1532 * If we don't have all the pdata yet, then bail.
1533 * We'll continue process when more data arrives.
1535 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1537 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1541 if (mpa->flags & MPA_REJECT) {
1543 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1544 err = -ECONNREFUSED;
1549 * If we get here we have accumulated the entire mpa
1550 * start reply message including private data. And
1551 * the MPA header is valid.
1553 state_set(&ep->com, FPDU_MODE);
1554 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1555 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1556 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1557 ep->mpa_attr.version = mpa->revision;
1558 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1560 if (mpa->revision == 2) {
1562 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1563 ep->mpa_attr.enhanced_rdma_conn =
1564 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1566 if (ep->mpa_attr.enhanced_rdma_conn) {
1568 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1569 mpa_v2_params = (struct mpa_v2_conn_params *)
1570 (ep->mpa_pkt + sizeof(*mpa));
1571 resp_ird = ntohs(mpa_v2_params->ird) &
1572 MPA_V2_IRD_ORD_MASK;
1573 resp_ord = ntohs(mpa_v2_params->ord) &
1574 MPA_V2_IRD_ORD_MASK;
1577 * This is a double-check. Ideally, below checks are
1578 * not required since ird/ord stuff has been taken
1579 * care of in c4iw_accept_cr
1581 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1583 CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep);
1590 if (ntohs(mpa_v2_params->ird) &
1591 MPA_V2_PEER2PEER_MODEL) {
1593 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
1594 if (ntohs(mpa_v2_params->ord) &
1595 MPA_V2_RDMA_WRITE_RTR) {
1597 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
1598 ep->mpa_attr.p2p_type =
1599 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1601 else if (ntohs(mpa_v2_params->ord) &
1602 MPA_V2_RDMA_READ_RTR) {
1604 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
1605 ep->mpa_attr.p2p_type =
1606 FW_RI_INIT_P2PTYPE_READ_REQ;
1612 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
1614 if (mpa->revision == 1) {
1616 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
1620 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
1621 ep->mpa_attr.p2p_type = p2p_type;
1626 if (set_tcpinfo(ep)) {
1628 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
1629 printf("%s set_tcpinfo error\n", __func__);
1633 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
1634 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
1635 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1636 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1637 ep->mpa_attr.p2p_type);
1640 * If responder's RTR does not match with that of initiator, assign
1641 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1642 * generated when moving QP to RTS state.
1643 * A TERM message will be sent after QP has moved to RTS state
1645 if ((ep->mpa_attr.version == 2) && peer2peer &&
1646 (ep->mpa_attr.p2p_type != p2p_type)) {
1648 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
1649 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1654 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
1655 attrs.mpa_attr = ep->mpa_attr;
1656 attrs.max_ird = ep->ird;
1657 attrs.max_ord = ep->ord;
1658 attrs.llp_stream_handle = ep;
1659 attrs.next_state = C4IW_QP_STATE_RTS;
1661 mask = C4IW_QP_ATTR_NEXT_STATE |
1662 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1663 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1665 /* bind QP and TID with INIT_WR */
1666 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
1670 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
1675 * If responder's RTR requirement did not match with what initiator
1676 * supports, generate TERM message
1680 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
1681 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1682 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1683 attrs.ecode = MPA_NOMATCH_RTR;
1684 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1685 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1686 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1692 * Generate TERM if initiator IRD is not sufficient for responder
1693 * provided ORD. Currently, we do the same behaviour even when
1694 * responder provided IRD is also not sufficient as regards to
1699 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
1700 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1702 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1703 attrs.ecode = MPA_INSUFF_IRD;
1704 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1705 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1706 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1712 state_set(&ep->com, ABORTING);
1713 abort_connection(ep);
1715 connect_reply_upcall(ep, err);
1716 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
1721 process_mpa_request(struct c4iw_ep *ep)
1723 struct mpa_message *mpa;
1725 int flags = MSG_DONTWAIT;
1729 enum c4iw_ep_state state = state_read(&ep->com);
1731 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
1733 if (state != MPA_REQ_WAIT)
1736 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
1737 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1741 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1742 uio.uio_segflg = UIO_SYSSPACE;
1743 uio.uio_rw = UIO_READ;
1744 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
1746 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
1752 abort_connection(ep);
1755 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
1756 __func__, ep->com.so));
1757 ep->mpa_pkt_len += uio.uio_offset;
1760 * If we get more than the supported amount of private data then we must
1761 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another
1762 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
1763 * byte is filled by the soreceive above.
1766 /* Don't even have the MPA message. Wait for more data to arrive. */
1767 if (ep->mpa_pkt_len < sizeof(*mpa))
1769 mpa = (struct mpa_message *) ep->mpa_pkt;
1772 * Validate MPA Header.
1774 if (mpa->revision > mpa_rev) {
1775 log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
1776 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1780 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1784 * Fail if there's too much private data.
1786 plen = ntohs(mpa->private_data_size);
1787 if (plen > MPA_MAX_PRIVATE_DATA)
1791 * If plen does not account for pkt size
1793 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1796 ep->plen = (u8) plen;
1799 * If we don't have all the pdata yet, then bail.
1801 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1805 * If we get here we have accumulated the entire mpa
1806 * start reply message including private data.
1808 ep->mpa_attr.initiator = 0;
1809 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1810 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1811 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1812 ep->mpa_attr.version = mpa->revision;
1813 if (mpa->revision == 1)
1814 ep->tried_with_mpa_v1 = 1;
1815 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1817 if (mpa->revision == 2) {
1818 ep->mpa_attr.enhanced_rdma_conn =
1819 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1820 if (ep->mpa_attr.enhanced_rdma_conn) {
1821 struct mpa_v2_conn_params *mpa_v2_params;
1824 mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)];
1825 ird = ntohs(mpa_v2_params->ird);
1826 ord = ntohs(mpa_v2_params->ord);
1828 ep->ird = ird & MPA_V2_IRD_ORD_MASK;
1829 ep->ord = ord & MPA_V2_IRD_ORD_MASK;
1830 if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) {
1831 if (ord & MPA_V2_RDMA_WRITE_RTR) {
1832 ep->mpa_attr.p2p_type =
1833 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1834 } else if (ord & MPA_V2_RDMA_READ_RTR) {
1835 ep->mpa_attr.p2p_type =
1836 FW_RI_INIT_P2PTYPE_READ_REQ;
1840 } else if (mpa->revision == 1 && peer2peer)
1841 ep->mpa_attr.p2p_type = p2p_type;
1843 if (set_tcpinfo(ep))
1846 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
1847 "xmit_marker_enabled = %d, version = %d", __func__,
1848 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1849 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1851 state_set(&ep->com, MPA_REQ_RCVD);
1855 mutex_lock(&ep->parent_ep->com.mutex);
1856 if (ep->parent_ep->com.state != DEAD)
1857 connect_request_upcall(ep);
1859 abort_connection(ep);
1860 mutex_unlock(&ep->parent_ep->com.mutex);
1864 * Upcall from the adapter indicating data has been transmitted.
1865 * For us its just the single MPA request or reply. We can now free
1866 * the skb holding the mpa message.
1868 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1871 struct c4iw_ep *ep = to_ep(cm_id);
1872 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
1874 if (state_read(&ep->com) == DEAD) {
1876 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
1877 c4iw_put_ep(&ep->com);
1880 set_bit(ULP_REJECT, &ep->com.history);
1881 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1885 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
1886 abort_connection(ep);
1890 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
1891 err = send_mpa_reject(ep, pdata, pdata_len);
1892 err = soshutdown(ep->com.so, 3);
1894 c4iw_put_ep(&ep->com);
1895 CTR2(KTR_IW_CXGBE, "%s:crc4 %p", __func__, ep);
1899 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1902 struct c4iw_qp_attributes attrs;
1903 enum c4iw_qp_attr_mask mask;
1904 struct c4iw_ep *ep = to_ep(cm_id);
1905 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1906 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1908 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
1910 if (state_read(&ep->com) == DEAD) {
1912 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
1917 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1920 set_bit(ULP_ACCEPT, &ep->com.history);
1922 if ((conn_param->ord > c4iw_max_read_depth) ||
1923 (conn_param->ird > c4iw_max_read_depth)) {
1925 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
1926 abort_connection(ep);
1931 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1933 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
1935 if (conn_param->ord > ep->ird) {
1937 CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep);
1938 ep->ird = conn_param->ird;
1939 ep->ord = conn_param->ord;
1940 send_mpa_reject(ep, conn_param->private_data,
1941 conn_param->private_data_len);
1942 abort_connection(ep);
1947 if (conn_param->ird > ep->ord) {
1949 CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep);
1953 CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep);
1954 conn_param->ird = 1;
1957 CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep);
1958 abort_connection(ep);
1965 ep->ird = conn_param->ird;
1966 ep->ord = conn_param->ord;
1968 if (ep->mpa_attr.version != 2) {
1970 CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep);
1972 if (peer2peer && ep->ird == 0) {
1974 CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep);
1980 cm_id->add_ref(cm_id);
1981 ep->com.cm_id = cm_id;
1983 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
1985 /* bind QP to EP and move to RTS */
1986 attrs.mpa_attr = ep->mpa_attr;
1987 attrs.max_ird = ep->ird;
1988 attrs.max_ord = ep->ord;
1989 attrs.llp_stream_handle = ep;
1990 attrs.next_state = C4IW_QP_STATE_RTS;
1992 /* bind QP and TID with INIT_WR */
1993 mask = C4IW_QP_ATTR_NEXT_STATE |
1994 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1995 C4IW_QP_ATTR_MPA_ATTR |
1996 C4IW_QP_ATTR_MAX_IRD |
1997 C4IW_QP_ATTR_MAX_ORD;
1999 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2003 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2006 err = send_mpa_reply(ep, conn_param->private_data,
2007 conn_param->private_data_len);
2011 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2015 state_set(&ep->com, FPDU_MODE);
2016 established_upcall(ep);
2017 c4iw_put_ep(&ep->com);
2018 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2021 ep->com.cm_id = NULL;
2023 cm_id->rem_ref(cm_id);
2025 c4iw_put_ep(&ep->com);
2026 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2032 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2035 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2036 struct c4iw_ep *ep = NULL;
2038 struct toedev *tdev;
2040 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2042 if ((conn_param->ord > c4iw_max_read_depth) ||
2043 (conn_param->ird > c4iw_max_read_depth)) {
2045 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2049 ep = alloc_ep(sizeof(*ep), M_NOWAIT);
2053 CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id);
2054 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2058 init_timer(&ep->timer);
2059 ep->plen = conn_param->private_data_len;
2063 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2064 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2065 conn_param->private_data, ep->plen);
2067 ep->ird = conn_param->ird;
2068 ep->ord = conn_param->ord;
2070 if (peer2peer && ep->ord == 0) {
2072 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2076 cm_id->add_ref(cm_id);
2078 ep->com.cm_id = cm_id;
2079 ep->com.qp = get_qhp(dev, conn_param->qpn);
2083 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2087 ep->com.thread = curthread;
2088 ep->com.so = cm_id->so;
2090 init_sock(&ep->com);
2094 cm_id->local_addr.sin_addr.s_addr,
2095 cm_id->remote_addr.sin_addr.s_addr,
2096 cm_id->local_addr.sin_port,
2097 cm_id->remote_addr.sin_port, 0);
2101 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2102 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2103 err = -EHOSTUNREACH;
2108 if (!(rt->rt_ifp->if_flags & IFCAP_TOE)) {
2110 CTR2(KTR_IW_CXGBE, "%s:cc8 %p", __func__, ep);
2111 printf("%s - interface not TOE capable.\n", __func__);
2114 tdev = TOEDEV(rt->rt_ifp);
2118 CTR2(KTR_IW_CXGBE, "%s:cc9 %p", __func__, ep);
2119 printf("%s - No toedev for interface.\n", __func__);
2124 state_set(&ep->com, CONNECTING);
2126 ep->com.local_addr = cm_id->local_addr;
2127 ep->com.remote_addr = cm_id->remote_addr;
2128 err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2133 CTR2(KTR_IW_CXGBE, "%s:cca %p", __func__, ep);
2138 CTR2(KTR_IW_CXGBE, "%s:ccb %p", __func__, ep);
2141 cm_id->rem_ref(cm_id);
2142 c4iw_put_ep(&ep->com);
2144 CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep);
2149 * iwcm->create_listen. Returns -errno on failure.
2152 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2155 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2156 struct c4iw_listen_ep *ep;
2157 struct socket *so = cm_id->so;
2159 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2160 CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__,
2161 cm_id, so, ep, so->so_pcb);
2163 log(LOG_ERR, "%s: failed to alloc memory for endpoint\n",
2169 cm_id->add_ref(cm_id);
2170 ep->com.cm_id = cm_id;
2172 ep->backlog = backlog;
2173 ep->com.local_addr = cm_id->local_addr;
2174 ep->com.thread = curthread;
2175 state_set(&ep->com, LISTEN);
2177 init_sock(&ep->com);
2179 rc = solisten(so, ep->backlog, ep->com.thread);
2181 log(LOG_ERR, "%s: failed to start listener: %d\n", __func__,
2183 close_socket(&ep->com, 0);
2184 cm_id->rem_ref(cm_id);
2185 c4iw_put_ep(&ep->com);
2189 cm_id->provider_data = ep;
2193 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc);
2198 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2201 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2203 CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, inp %p", __func__, cm_id,
2204 cm_id->so, cm_id->so->so_pcb);
2206 state_set(&ep->com, DEAD);
2207 rc = close_socket(&ep->com, 0);
2208 cm_id->rem_ref(cm_id);
2209 c4iw_put_ep(&ep->com);
2214 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2219 struct c4iw_rdev *rdev;
2221 mutex_lock(&ep->com.mutex);
2223 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2225 rdev = &ep->com.dev->rdev;
2227 if (c4iw_fatal_error(rdev)) {
2229 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2231 close_complete_upcall(ep);
2232 ep->com.state = DEAD;
2234 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2235 states[ep->com.state]);
2237 switch (ep->com.state) {
2246 ep->com.state = ABORTING;
2248 ep->com.state = CLOSING;
2251 set_bit(CLOSE_SENT, &ep->com.flags);
2256 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2261 ep->com.state = ABORTING;
2263 ep->com.state = MORIBUND;
2271 "%s ignoring disconnect ep %p state %u", __func__,
2280 mutex_unlock(&ep->com.mutex);
2284 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2288 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2289 set_bit(EP_DISC_ABORT, &ep->com.history);
2290 ret = abort_connection(ep);
2293 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2294 set_bit(EP_DISC_CLOSE, &ep->com.history);
2297 __state_set(&ep->com, MORIBUND);
2298 ret = shutdown_socket(&ep->com);
2309 release_ep_resources(ep);
2310 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2312 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2316 #ifdef C4IW_EP_REDIRECT
2317 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2318 struct l2t_entry *l2t)
2320 struct c4iw_ep *ep = ctx;
2325 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2328 cxgb4_l2t_release(ep->l2t);
2338 static void ep_timeout(unsigned long arg)
2340 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2343 CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep);
2344 spin_lock(&timeout_lock);
2346 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2348 list_add_tail(&ep->entry, &timeout_list);
2351 spin_unlock(&timeout_lock);
2355 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2356 queue_work(c4iw_taskq, &c4iw_task);
2358 CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep);
2361 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2363 uint64_t val = be64toh(*rpl);
2365 struct c4iw_wr_wait *wr_waitp;
2367 ret = (int)((val >> 8) & 0xff);
2368 wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2369 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2371 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2376 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2378 struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]);
2380 CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl);
2381 c4iw_ev_dispatch(sc->iwarp_softc, &cqe);
2386 static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2389 struct adapter *sc = iq->adapter;
2391 const struct cpl_rdma_terminate *rpl = (const void *)(rss + 1);
2392 unsigned int tid = GET_TID(rpl);
2393 struct c4iw_qp_attributes attrs;
2394 struct toepcb *toep = lookup_tid(sc, tid);
2395 struct socket *so = inp_inpcbtosocket(toep->inp);
2396 struct c4iw_ep *ep = so->so_rcv.sb_upcallarg;
2398 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2400 if (ep && ep->com.qp) {
2402 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2403 ep->com.qp->wq.sq.qid);
2404 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2405 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2408 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2409 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2415 c4iw_cm_init_cpl(struct adapter *sc)
2418 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate);
2419 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl);
2420 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler);
2421 t4_register_an_handler(sc, c4iw_ev_handler);
2425 c4iw_cm_term_cpl(struct adapter *sc)
2428 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL);
2429 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL);
2430 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL);
2433 int __init c4iw_cm_init(void)
2436 TAILQ_INIT(&req_list);
2437 spin_lock_init(&req_lock);
2438 INIT_LIST_HEAD(&timeout_list);
2439 spin_lock_init(&timeout_lock);
2441 INIT_WORK(&c4iw_task, process_req);
2443 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2451 void __exit c4iw_cm_term(void)
2453 WARN_ON(!TAILQ_EMPTY(&req_list));
2454 WARN_ON(!list_empty(&timeout_list));
2455 flush_workqueue(c4iw_taskq);
2456 destroy_workqueue(c4iw_taskq);