2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet6/in6_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/in_fib.h>
54 #include <netinet6/in6_fib.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcpip.h>
61 #include <netinet/toecore.h>
65 struct cpl_set_tcb_rpl;
66 #include <linux/types.h>
68 #include "tom/t4_tom.h"
70 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe))
73 #include <linux/module.h>
74 #include <linux/workqueue.h>
75 #include <linux/notifier.h>
76 #include <linux/inetdevice.h>
77 #include <linux/if_vlan.h>
78 #include <net/netevent.h>
79 #include <rdma/rdma_cm.h>
81 static spinlock_t req_lock;
82 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
83 static struct work_struct c4iw_task;
84 static struct workqueue_struct *c4iw_taskq;
85 static LIST_HEAD(err_cqe_list);
86 static spinlock_t err_cqe_lock;
87 static LIST_HEAD(listen_port_list);
88 static DEFINE_MUTEX(listen_port_mutex);
90 static void process_req(struct work_struct *ctx);
91 static void start_ep_timer(struct c4iw_ep *ep);
92 static int stop_ep_timer(struct c4iw_ep *ep);
93 static int set_tcpinfo(struct c4iw_ep *ep);
94 static void process_timeout(struct c4iw_ep *ep);
95 static void process_err_cqes(void);
96 static void *alloc_ep(int size, gfp_t flags);
97 static void close_socket(struct socket *so);
98 static int send_mpa_req(struct c4iw_ep *ep);
99 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
100 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
101 static void close_complete_upcall(struct c4iw_ep *ep, int status);
102 static int send_abort(struct c4iw_ep *ep);
103 static void peer_close_upcall(struct c4iw_ep *ep);
104 static void peer_abort_upcall(struct c4iw_ep *ep);
105 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
106 static int connect_request_upcall(struct c4iw_ep *ep);
107 static void established_upcall(struct c4iw_ep *ep);
108 static int process_mpa_reply(struct c4iw_ep *ep);
109 static int process_mpa_request(struct c4iw_ep *ep);
110 static void process_peer_close(struct c4iw_ep *ep);
111 static void process_conn_error(struct c4iw_ep *ep);
112 static void process_close_complete(struct c4iw_ep *ep);
113 static void ep_timeout(unsigned long arg);
114 static void setiwsockopt(struct socket *so);
115 static void init_iwarp_socket(struct socket *so, void *arg);
116 static void uninit_iwarp_socket(struct socket *so);
117 static void process_data(struct c4iw_ep *ep);
118 static void process_connected(struct c4iw_ep *ep);
119 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
120 static void process_socket_event(struct c4iw_ep *ep);
121 static void release_ep_resources(struct c4iw_ep *ep);
122 static int process_terminate(struct c4iw_ep *ep);
123 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
125 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
126 static struct listen_port_info *
127 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
128 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
129 static struct c4iw_listen_ep *
130 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
131 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
133 static void process_newconn(struct c4iw_listen_ep *master_lep,
134 struct socket *new_so);
135 #define START_EP_TIMER(ep) \
137 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
138 __func__, __LINE__, (ep)); \
139 start_ep_timer(ep); \
142 #define STOP_EP_TIMER(ep) \
144 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
145 __func__, __LINE__, (ep)); \
149 #define GET_LOCAL_ADDR(pladdr, so) \
151 struct sockaddr_storage *__a = NULL; \
152 struct inpcb *__inp = sotoinpcb(so); \
153 KASSERT(__inp != NULL, \
154 ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
155 if (__inp->inp_vflag & INP_IPV4) \
156 in_getsockaddr(so, (struct sockaddr **)&__a); \
158 in6_getsockaddr(so, (struct sockaddr **)&__a); \
160 free(__a, M_SONAME); \
163 #define GET_REMOTE_ADDR(praddr, so) \
165 struct sockaddr_storage *__a = NULL; \
166 struct inpcb *__inp = sotoinpcb(so); \
167 KASSERT(__inp != NULL, \
168 ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
169 if (__inp->inp_vflag & INP_IPV4) \
170 in_getpeeraddr(so, (struct sockaddr **)&__a); \
172 in6_getpeeraddr(so, (struct sockaddr **)&__a); \
174 free(__a, M_SONAME); \
177 static char *states[] = {
193 static void deref_cm_id(struct c4iw_ep_common *epc)
195 epc->cm_id->rem_ref(epc->cm_id);
197 set_bit(CM_ID_DEREFED, &epc->history);
200 static void ref_cm_id(struct c4iw_ep_common *epc)
202 set_bit(CM_ID_REFED, &epc->history);
203 epc->cm_id->add_ref(epc->cm_id);
206 static void deref_qp(struct c4iw_ep *ep)
208 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
209 clear_bit(QP_REFERENCED, &ep->com.flags);
210 set_bit(QP_DEREFED, &ep->com.history);
213 static void ref_qp(struct c4iw_ep *ep)
215 set_bit(QP_REFERENCED, &ep->com.flags);
216 set_bit(QP_REFED, &ep->com.history);
217 c4iw_qp_add_ref(&ep->com.qp->ibqp);
219 /* allocated per TCP port while listening */
220 struct listen_port_info {
221 uint16_t port_num; /* TCP port address */
222 struct list_head list; /* belongs to listen_port_list */
223 struct list_head lep_list; /* per port lep list */
224 uint32_t refcnt; /* number of lep's listening */
228 * Following two lists are used to manage INADDR_ANY listeners:
232 * Below is the INADDR_ANY listener lists overview on a system with a two port
234 * |------------------|
235 * |listen_port_list |
236 * |------------------|
238 * | |-----------| |-----------|
239 * | | port_num:X| | port_num:X|
240 * |--------------|-list------|-------|-list------|-------....
241 * | lep_list----| | lep_list----|
242 * | refcnt | | | refcnt | |
245 * |-----------| | |-----------| |
250 * | | |----------------| |----------------|
251 * | |----| listen_ep_list |----| listen_ep_list |
252 * | |----------------| |----------------|
256 * | |----------------| |----------------|
257 * |---| listen_ep_list |----| listen_ep_list |
258 * |----------------| |----------------|
260 * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
261 * each TCP port number.
263 * Here 'lep1' is always marked as Master lep, because solisten() is always
264 * called through first lep.
267 static struct listen_port_info *
268 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
271 struct listen_port_info *port_info = NULL;
272 struct sockaddr_storage *laddr = &lep->com.local_addr;
274 port = (laddr->ss_family == AF_INET) ?
275 ((struct sockaddr_in *)laddr)->sin_port :
276 ((struct sockaddr_in6 *)laddr)->sin6_port;
278 mutex_lock(&listen_port_mutex);
280 list_for_each_entry(port_info, &listen_port_list, list)
281 if (port_info->port_num == port)
284 port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
285 port_info->port_num = port;
286 port_info->refcnt = 0;
288 list_add_tail(&port_info->list, &listen_port_list);
289 INIT_LIST_HEAD(&port_info->lep_list);
293 list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
294 mutex_unlock(&listen_port_mutex);
299 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
302 struct listen_port_info *port_info = NULL;
303 struct sockaddr_storage *laddr = &lep->com.local_addr;
306 port = (laddr->ss_family == AF_INET) ?
307 ((struct sockaddr_in *)laddr)->sin_port :
308 ((struct sockaddr_in6 *)laddr)->sin6_port;
310 mutex_lock(&listen_port_mutex);
312 /* get the port_info structure based on the lep's port address */
313 list_for_each_entry(port_info, &listen_port_list, list) {
314 if (port_info->port_num == port) {
316 refcnt = port_info->refcnt;
317 /* remove the current lep from the listen list */
318 list_del(&lep->listen_ep_list);
319 if (port_info->refcnt == 0) {
320 /* Remove this entry from the list as there
321 * are no more listeners for this port_num.
323 list_del(&port_info->list);
329 mutex_unlock(&listen_port_mutex);
334 * Find the lep that belongs to the ifnet on which the SYN frame was received.
336 struct c4iw_listen_ep *
337 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
339 struct adapter *adap = NULL;
340 struct c4iw_listen_ep *lep = NULL;
341 struct ifnet *ifp = NULL, *hw_ifp = NULL;
342 struct listen_port_info *port_info = NULL;
343 int i = 0, found_portinfo = 0, found_lep = 0;
347 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
348 * interfaces like vlan, lagg, etc..
349 * TBD: lagg support, lagg + vlan support.
351 ifp = TOEPCB(so)->l2te->ifp;
352 if (ifp->if_type == IFT_L2VLAN) {
353 hw_ifp = VLAN_TRUNKDEV(ifp);
354 if (hw_ifp == NULL) {
355 CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
356 "vlan ifnet %p, sock %p, master_lep %p",
357 __func__, ifp, so, master_lep);
363 /* STEP 2: Find 'port_info' with listener local port address. */
364 port = (master_lep->com.local_addr.ss_family == AF_INET) ?
365 ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
366 ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
369 mutex_lock(&listen_port_mutex);
370 list_for_each_entry(port_info, &listen_port_list, list)
371 if (port_info->port_num == port) {
378 /* STEP 3: Traverse through list of lep's that are bound to the current
379 * TCP port address and find the lep that belongs to the ifnet on which
380 * the SYN frame was received.
382 list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
383 adap = lep->com.dev->rdev.adap;
384 for_each_port(adap, i) {
385 if (hw_ifp == adap->port[i]->vi[0].ifp) {
392 mutex_unlock(&listen_port_mutex);
393 return found_lep ? lep : (NULL);
396 static void process_timeout(struct c4iw_ep *ep)
398 struct c4iw_qp_attributes attrs = {0};
401 CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
402 ep, ep->hwtid, ep->com.state);
403 set_bit(TIMEDOUT, &ep->com.history);
404 switch (ep->com.state) {
406 connect_reply_upcall(ep, -ETIMEDOUT);
415 if (ep->com.cm_id && ep->com.qp) {
416 attrs.next_state = C4IW_QP_STATE_ERROR;
417 c4iw_modify_qp(ep->com.dev, ep->com.qp,
418 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
420 close_complete_upcall(ep, -ETIMEDOUT);
425 * These states are expected if the ep timed out at the same
426 * time as another thread was calling stop_ep_timer().
427 * So we silently do nothing for these states.
432 CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
433 , __func__, ep, ep->hwtid, ep->com.state);
437 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
438 c4iw_put_ep(&ep->com);
442 struct cqe_list_entry {
443 struct list_head entry;
444 struct c4iw_dev *rhp;
445 struct t4_cqe err_cqe;
449 process_err_cqes(void)
452 struct cqe_list_entry *cle;
454 spin_lock_irqsave(&err_cqe_lock, flag);
455 while (!list_empty(&err_cqe_list)) {
456 struct list_head *tmp;
457 tmp = err_cqe_list.next;
459 tmp->next = tmp->prev = NULL;
460 spin_unlock_irqrestore(&err_cqe_lock, flag);
461 cle = list_entry(tmp, struct cqe_list_entry, entry);
462 c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
464 spin_lock_irqsave(&err_cqe_lock, flag);
466 spin_unlock_irqrestore(&err_cqe_lock, flag);
472 process_req(struct work_struct *ctx)
474 struct c4iw_ep_common *epc;
479 spin_lock_irqsave(&req_lock, flag);
480 while (!TAILQ_EMPTY(&req_list)) {
481 epc = TAILQ_FIRST(&req_list);
482 TAILQ_REMOVE(&req_list, epc, entry);
483 epc->entry.tqe_prev = NULL;
484 ep_events = epc->ep_events;
486 spin_unlock_irqrestore(&req_lock, flag);
487 mutex_lock(&epc->mutex);
488 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
489 __func__, epc->so, epc, states[epc->state], ep_events);
490 if (ep_events & C4IW_EVENT_TERM)
491 process_terminate((struct c4iw_ep *)epc);
492 if (ep_events & C4IW_EVENT_TIMEOUT)
493 process_timeout((struct c4iw_ep *)epc);
494 if (ep_events & C4IW_EVENT_SOCKET)
495 process_socket_event((struct c4iw_ep *)epc);
496 mutex_unlock(&epc->mutex);
499 spin_lock_irqsave(&req_lock, flag);
501 spin_unlock_irqrestore(&req_lock, flag);
505 * XXX: doesn't belong here in the iWARP driver.
506 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
507 * set. Is this a valid assumption for active open?
510 set_tcpinfo(struct c4iw_ep *ep)
512 struct socket *so = ep->com.so;
513 struct inpcb *inp = sotoinpcb(so);
520 if ((tp->t_flags & TF_TOE) == 0) {
522 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
528 ep->hwtid = toep->tid;
529 ep->snd_seq = tp->snd_nxt;
530 ep->rcv_seq = tp->rcv_nxt;
537 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
541 if (raddr->ss_family == AF_INET) {
542 struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
543 struct nhop4_extended nh4 = {0};
545 err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr,
549 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
551 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
552 struct nhop6_extended nh6 = {0};
553 struct in6_addr addr6;
556 memset(&addr6, 0, sizeof(addr6));
557 in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
559 err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid,
563 fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6);
566 CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
571 close_socket(struct socket *so)
573 uninit_iwarp_socket(so);
578 process_peer_close(struct c4iw_ep *ep)
580 struct c4iw_qp_attributes attrs = {0};
584 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
585 ep->com.so, states[ep->com.state]);
587 switch (ep->com.state) {
590 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
594 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
596 ep->com.state = DEAD;
597 connect_reply_upcall(ep, -ECONNABORTED);
601 close_socket(ep->com.so);
602 deref_cm_id(&ep->com);
609 * We're gonna mark this puppy DEAD, but keep
610 * the reference on it until the ULP accepts or
613 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
615 ep->com.state = CLOSING;
619 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
621 ep->com.state = CLOSING;
625 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
628 ep->com.state = CLOSING;
629 attrs.next_state = C4IW_QP_STATE_CLOSING;
630 c4iw_modify_qp(ep->com.dev, ep->com.qp,
631 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
632 peer_close_upcall(ep);
636 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
642 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
644 ep->com.state = MORIBUND;
649 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
652 if (ep->com.cm_id && ep->com.qp) {
653 attrs.next_state = C4IW_QP_STATE_IDLE;
654 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
655 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
657 close_socket(ep->com.so);
658 close_complete_upcall(ep, 0);
659 ep->com.state = DEAD;
665 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
671 panic("%s: ep %p state %d", __func__, ep,
679 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
680 c4iw_ep_disconnect(ep, 0, M_NOWAIT);
684 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
685 c4iw_put_ep(&ep->com);
687 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
692 process_conn_error(struct c4iw_ep *ep)
694 struct c4iw_qp_attributes attrs = {0};
698 state = ep->com.state;
699 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
700 __func__, ep, ep->com.so, ep->com.so->so_error,
701 states[ep->com.state]);
707 c4iw_put_ep(&ep->parent_ep->com);
712 connect_reply_upcall(ep, -ECONNRESET);
716 ep->com.rpl_err = ECONNRESET;
717 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
729 if (ep->com.cm_id && ep->com.qp) {
731 attrs.next_state = C4IW_QP_STATE_ERROR;
732 ret = c4iw_modify_qp(ep->com.qp->rhp,
733 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
737 "%s - qp <- error failed!\n",
740 peer_abort_upcall(ep);
747 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
748 __func__, ep->com.so->so_error);
752 panic("%s: ep %p state %d", __func__, ep, state);
756 if (state != ABORTING) {
757 close_socket(ep->com.so);
758 ep->com.state = DEAD;
759 c4iw_put_ep(&ep->com);
761 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
766 process_close_complete(struct c4iw_ep *ep)
768 struct c4iw_qp_attributes attrs = {0};
771 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
772 ep->com.so, states[ep->com.state]);
774 /* The cm_id may be null if we failed to connect */
775 set_bit(CLOSE_CON_RPL, &ep->com.history);
777 switch (ep->com.state) {
780 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
782 ep->com.state = MORIBUND;
786 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
790 if ((ep->com.cm_id) && (ep->com.qp)) {
792 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
794 attrs.next_state = C4IW_QP_STATE_IDLE;
795 c4iw_modify_qp(ep->com.dev,
797 C4IW_QP_ATTR_NEXT_STATE,
801 close_socket(ep->com.so);
802 close_complete_upcall(ep, 0);
803 ep->com.state = DEAD;
808 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
812 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
815 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
817 panic("%s:pcc6 %p unknown ep state", __func__, ep);
823 CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
824 release_ep_resources(ep);
826 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
831 setiwsockopt(struct socket *so)
837 sopt.sopt_dir = SOPT_SET;
838 sopt.sopt_level = IPPROTO_TCP;
839 sopt.sopt_name = TCP_NODELAY;
840 sopt.sopt_val = (caddr_t)&on;
841 sopt.sopt_valsize = sizeof on;
843 rc = -sosetopt(so, &sopt);
845 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
851 init_iwarp_socket(struct socket *so, void *arg)
853 if (SOLISTENING(so)) {
855 solisten_upcall_set(so, c4iw_so_upcall, arg);
856 so->so_state |= SS_NBIO;
859 SOCKBUF_LOCK(&so->so_rcv);
860 soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
861 so->so_state |= SS_NBIO;
862 SOCKBUF_UNLOCK(&so->so_rcv);
867 uninit_iwarp_socket(struct socket *so)
869 if (SOLISTENING(so)) {
871 solisten_upcall_set(so, NULL, NULL);
874 SOCKBUF_LOCK(&so->so_rcv);
875 soupcall_clear(so, SO_RCV);
876 SOCKBUF_UNLOCK(&so->so_rcv);
881 process_data(struct c4iw_ep *ep)
885 struct c4iw_qp_attributes attrs = {0};
887 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
888 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
890 switch (ep->com.state) {
892 disconnect = process_mpa_reply(ep);
895 disconnect = process_mpa_request(ep);
897 /* Refered in process_newconn() */
898 c4iw_put_ep(&ep->parent_ep->com);
901 MPASS(ep->com.qp != NULL);
902 attrs.next_state = C4IW_QP_STATE_TERMINATE;
903 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
904 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
905 if (ret != -EINPROGRESS)
909 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
910 "state %d, so %p, so_state 0x%x, sbused %u\n",
911 __func__, ep, ep->com.state, ep->com.so,
912 ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
916 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
921 process_connected(struct c4iw_ep *ep)
923 struct socket *so = ep->com.so;
925 if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
926 if (send_mpa_req(ep))
929 connect_reply_upcall(ep, -so->so_error);
935 ep->com.state = DEAD;
936 c4iw_put_ep(&ep->com);
940 static inline int c4iw_zero_addr(struct sockaddr *addr)
942 struct in6_addr *ip6;
944 if (addr->sa_family == AF_INET)
946 ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
948 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
949 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
950 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
954 static inline int c4iw_loopback_addr(struct sockaddr *addr)
956 if (addr->sa_family == AF_INET)
958 ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
960 return IN6_IS_ADDR_LOOPBACK(
961 &((struct sockaddr_in6 *) addr)->sin6_addr);
964 static inline int c4iw_any_addr(struct sockaddr *addr)
966 return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
970 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
972 struct c4iw_listen_ep *real_lep = NULL;
973 struct c4iw_ep *new_ep = NULL;
974 struct sockaddr_in *remote = NULL;
977 MPASS(new_so != NULL);
979 if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
980 /* Here we need to find the 'real_lep' that belongs to the
981 * incomming socket's network interface, such that the newly
982 * created 'ep' can be attached to the real 'lep'.
984 real_lep = find_real_listen_ep(master_lep, new_so);
985 if (real_lep == NULL) {
986 CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
987 "ep for sock: %p", __func__, new_so);
988 log(LOG_ERR,"%s: Could not find the real listen ep for "
989 "sock: %p\n", __func__, new_so);
990 /* FIXME: properly free the 'new_so' in failure case.
991 * Use of soabort() and soclose() are not legal
992 * here(before soaccept()).
996 } else /* for Non-Wildcard address, master_lep is always the real_lep */
997 real_lep = master_lep;
999 new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
1001 CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1002 "listening so %p, new so %p", __func__, master_lep, real_lep,
1003 new_ep, master_lep->com.so, new_so);
1005 new_ep->com.dev = real_lep->com.dev;
1006 new_ep->com.so = new_so;
1007 new_ep->com.cm_id = NULL;
1008 new_ep->com.thread = real_lep->com.thread;
1009 new_ep->parent_ep = real_lep;
1011 GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1012 GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1013 c4iw_get_ep(&real_lep->com);
1014 init_timer(&new_ep->timer);
1015 new_ep->com.state = MPA_REQ_WAIT;
1016 START_EP_TIMER(new_ep);
1018 setiwsockopt(new_so);
1019 ret = soaccept(new_so, (struct sockaddr **)&remote);
1022 "%s:listen sock:%p, new sock:%p, ret:%d",
1023 __func__, master_lep->com.so, new_so, ret);
1025 free(remote, M_SONAME);
1026 uninit_iwarp_socket(new_so);
1028 c4iw_put_ep(&new_ep->com);
1029 c4iw_put_ep(&real_lep->com);
1032 free(remote, M_SONAME);
1034 /* MPA request might have been queued up on the socket already, so we
1035 * initialize the socket/upcall_handler under lock to prevent processing
1036 * MPA request on another thread(via process_req()) simultaniously.
1038 c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1039 avoid freeing of ep before ep unlock. */
1040 mutex_lock(&new_ep->com.mutex);
1041 init_iwarp_socket(new_so, &new_ep->com);
1043 ret = process_mpa_request(new_ep);
1046 c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1047 c4iw_put_ep(&real_lep->com);
1049 mutex_unlock(&new_ep->com.mutex);
1050 c4iw_put_ep(&new_ep->com);
1055 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1059 spin_lock_irqsave(&req_lock, flag);
1060 if (ep && ep->com.so) {
1061 ep->com.ep_events |= new_ep_event;
1062 if (!ep->com.entry.tqe_prev) {
1063 c4iw_get_ep(&ep->com);
1064 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1065 queue_work(c4iw_taskq, &c4iw_task);
1068 spin_unlock_irqrestore(&req_lock, flag);
1074 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1076 struct c4iw_ep *ep = arg;
1079 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1080 __func__, so, so->so_state, ep, states[ep->com.state],
1081 ep->com.entry.tqe_prev);
1083 MPASS(ep->com.so == so);
1085 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1089 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1090 add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1097 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1099 struct adapter *sc = iq->adapter;
1100 const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1101 unsigned int tid = GET_TID(cpl);
1102 struct toepcb *toep = lookup_tid(sc, tid);
1106 INP_WLOCK(toep->inp);
1107 so = inp_inpcbtosocket(toep->inp);
1108 ep = so->so_rcv.sb_upcallarg;
1109 INP_WUNLOCK(toep->inp);
1111 CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1112 add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1118 process_socket_event(struct c4iw_ep *ep)
1120 int state = ep->com.state;
1121 struct socket *so = ep->com.so;
1123 if (ep->com.state == DEAD) {
1124 CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1125 "ep %p ep_state %s", __func__, ep, states[state]);
1129 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1130 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1131 so->so_error, so->so_rcv.sb_state, ep, states[state]);
1133 if (state == CONNECTING) {
1134 process_connected(ep);
1138 if (state == LISTEN) {
1139 struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1140 struct socket *listen_so = so, *new_so = NULL;
1143 SOLISTEN_LOCK(listen_so);
1145 error = solisten_dequeue(listen_so, &new_so,
1148 CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1149 "error %d", __func__, lep, listen_so,
1153 process_newconn(lep, new_so);
1155 /* solisten_dequeue() unlocks while return, so aquire
1156 * lock again for sol_qlen and also for next iteration.
1158 SOLISTEN_LOCK(listen_so);
1159 } while (listen_so->sol_qlen);
1160 SOLISTEN_UNLOCK(listen_so);
1165 /* connection error */
1167 process_conn_error(ep);
1172 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1173 process_peer_close(ep);
1175 * check whether socket disconnect event is pending before
1176 * returning. Fallthrough if yes.
1178 if (!(so->so_state & SS_ISDISCONNECTED))
1182 /* close complete */
1183 if (so->so_state & SS_ISDISCONNECTED) {
1184 process_close_complete(ep);
1189 if (sbused(&ep->com.so->so_rcv)) {
1194 /* Socket events for 'MPA Request Received' and 'Close Complete'
1195 * were already processed earlier in their previous events handlers.
1196 * Hence, these socket events are skipped.
1197 * And any other socket events must have handled above.
1199 MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1201 if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1202 log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1203 "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1204 __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1209 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1210 "iw_cxgbe driver parameters");
1212 static int dack_mode = 0;
1213 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1214 "Delayed ack mode (default = 0)");
1216 int c4iw_max_read_depth = 8;
1217 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1218 "Per-connection max ORD/IRD (default = 8)");
1220 static int enable_tcp_timestamps;
1221 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1222 "Enable tcp timestamps (default = 0)");
1224 static int enable_tcp_sack;
1225 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1226 "Enable tcp SACK (default = 0)");
1228 static int enable_tcp_window_scaling = 1;
1229 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1230 "Enable tcp window scaling (default = 1)");
1233 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1234 "Enable debug logging (default = 0)");
1236 static int peer2peer = 1;
1237 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1238 "Support peer2peer ULPs (default = 1)");
1240 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1241 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1242 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1244 static int ep_timeout_secs = 60;
1245 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1246 "CM Endpoint operation timeout in seconds (default = 60)");
1248 static int mpa_rev = 1;
1249 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1250 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1252 static int markers_enabled;
1253 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1254 "Enable MPA MARKERS (default(0) = disabled)");
1256 static int crc_enabled = 1;
1257 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1258 "Enable MPA CRC (default(1) = enabled)");
1260 static int rcv_win = 256 * 1024;
1261 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1262 "TCP receive window in bytes (default = 256KB)");
1264 static int snd_win = 128 * 1024;
1265 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1266 "TCP send window in bytes (default = 128KB)");
1269 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
1270 "Use DSGL for PBL/FastReg (default=1)");
1272 int inline_threshold = 128;
1273 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
1274 "inline vs dsgl threshold (default=128)");
1276 static int reuseaddr = 0;
1277 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
1278 "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
1281 start_ep_timer(struct c4iw_ep *ep)
1284 if (timer_pending(&ep->timer)) {
1285 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1286 printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1290 clear_bit(TIMEOUT, &ep->com.flags);
1291 c4iw_get_ep(&ep->com);
1292 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1293 ep->timer.data = (unsigned long)ep;
1294 ep->timer.function = ep_timeout;
1295 add_timer(&ep->timer);
1299 stop_ep_timer(struct c4iw_ep *ep)
1302 del_timer_sync(&ep->timer);
1303 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1304 c4iw_put_ep(&ep->com);
1311 alloc_ep(int size, gfp_t gfp)
1313 struct c4iw_ep_common *epc;
1315 epc = kzalloc(size, gfp);
1319 kref_init(&epc->kref);
1320 mutex_init(&epc->mutex);
1321 c4iw_init_wr_wait(&epc->wr_wait);
1326 void _c4iw_free_ep(struct kref *kref)
1329 struct c4iw_ep_common *epc;
1331 ep = container_of(kref, struct c4iw_ep, com.kref);
1333 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1335 if (test_bit(QP_REFERENCED, &ep->com.flags))
1337 CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1338 __func__, ep, epc->history, epc->flags);
1342 static void release_ep_resources(struct c4iw_ep *ep)
1344 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1345 set_bit(RELEASE_RESOURCES, &ep->com.flags);
1346 c4iw_put_ep(&ep->com);
1347 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1351 send_mpa_req(struct c4iw_ep *ep)
1354 struct mpa_message *mpa;
1355 struct mpa_v2_conn_params mpa_v2_params;
1357 char mpa_rev_to_use = mpa_rev;
1360 if (ep->retry_with_mpa_v1)
1362 mpalen = sizeof(*mpa) + ep->plen;
1363 if (mpa_rev_to_use == 2)
1364 mpalen += sizeof(struct mpa_v2_conn_params);
1366 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1369 CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1374 memset(mpa, 0, mpalen);
1375 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1376 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1377 (markers_enabled ? MPA_MARKERS : 0) |
1378 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1379 mpa->private_data_size = htons(ep->plen);
1380 mpa->revision = mpa_rev_to_use;
1382 if (mpa_rev_to_use == 1) {
1383 ep->tried_with_mpa_v1 = 1;
1384 ep->retry_with_mpa_v1 = 0;
1387 if (mpa_rev_to_use == 2) {
1388 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1389 sizeof(struct mpa_v2_conn_params));
1390 mpa_v2_params.ird = htons((u16)ep->ird);
1391 mpa_v2_params.ord = htons((u16)ep->ord);
1394 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1396 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1397 mpa_v2_params.ord |=
1398 htons(MPA_V2_RDMA_WRITE_RTR);
1399 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1400 mpa_v2_params.ord |=
1401 htons(MPA_V2_RDMA_READ_RTR);
1404 memcpy(mpa->private_data, &mpa_v2_params,
1405 sizeof(struct mpa_v2_conn_params));
1409 memcpy(mpa->private_data +
1410 sizeof(struct mpa_v2_conn_params),
1411 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1416 memcpy(mpa->private_data,
1417 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1418 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1421 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1424 CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1429 m_copyback(m, 0, mpalen, (void *)mpa);
1432 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1435 CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1441 ep->com.state = MPA_REQ_SENT;
1442 ep->mpa_attr.initiator = 1;
1443 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1446 connect_reply_upcall(ep, err);
1447 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1451 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1454 struct mpa_message *mpa;
1455 struct mpa_v2_conn_params mpa_v2_params;
1459 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1462 mpalen = sizeof(*mpa) + plen;
1464 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1466 mpalen += sizeof(struct mpa_v2_conn_params);
1467 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1468 ep->mpa_attr.version, mpalen);
1471 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1475 memset(mpa, 0, mpalen);
1476 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1477 mpa->flags = MPA_REJECT;
1478 mpa->revision = mpa_rev;
1479 mpa->private_data_size = htons(plen);
1481 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1483 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1484 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1485 sizeof(struct mpa_v2_conn_params));
1486 mpa_v2_params.ird = htons(((u16)ep->ird) |
1487 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1489 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1491 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1492 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1493 FW_RI_INIT_P2PTYPE_READ_REQ ?
1494 MPA_V2_RDMA_READ_RTR : 0) : 0));
1495 memcpy(mpa->private_data, &mpa_v2_params,
1496 sizeof(struct mpa_v2_conn_params));
1499 memcpy(mpa->private_data +
1500 sizeof(struct mpa_v2_conn_params), pdata, plen);
1501 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1502 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1505 memcpy(mpa->private_data, pdata, plen);
1507 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1512 m_copyback(m, 0, mpalen, (void *)mpa);
1515 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1517 ep->snd_seq += mpalen;
1518 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1522 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1525 struct mpa_message *mpa;
1527 struct mpa_v2_conn_params mpa_v2_params;
1530 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1532 mpalen = sizeof(*mpa) + plen;
1534 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1536 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1537 ep->mpa_attr.version);
1538 mpalen += sizeof(struct mpa_v2_conn_params);
1541 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1545 memset(mpa, 0, sizeof(*mpa));
1546 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1547 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1548 (markers_enabled ? MPA_MARKERS : 0);
1549 mpa->revision = ep->mpa_attr.version;
1550 mpa->private_data_size = htons(plen);
1552 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1554 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1555 mpa->private_data_size +=
1556 htons(sizeof(struct mpa_v2_conn_params));
1557 mpa_v2_params.ird = htons((u16)ep->ird);
1558 mpa_v2_params.ord = htons((u16)ep->ord);
1559 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1560 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1562 if (peer2peer && (ep->mpa_attr.p2p_type !=
1563 FW_RI_INIT_P2PTYPE_DISABLED)) {
1565 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1567 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1569 mpa_v2_params.ord |=
1570 htons(MPA_V2_RDMA_WRITE_RTR);
1571 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1572 __func__, ep, p2p_type, mpa_v2_params.ird,
1575 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1577 mpa_v2_params.ord |=
1578 htons(MPA_V2_RDMA_READ_RTR);
1579 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1580 __func__, ep, p2p_type, mpa_v2_params.ird,
1585 memcpy(mpa->private_data, &mpa_v2_params,
1586 sizeof(struct mpa_v2_conn_params));
1589 memcpy(mpa->private_data +
1590 sizeof(struct mpa_v2_conn_params), pdata, plen);
1593 memcpy(mpa->private_data, pdata, plen);
1595 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1600 m_copyback(m, 0, mpalen, (void *)mpa);
1604 ep->com.state = MPA_REP_SENT;
1605 ep->snd_seq += mpalen;
1606 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1608 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1614 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1616 struct iw_cm_event event;
1618 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1619 memset(&event, 0, sizeof(event));
1620 event.event = IW_CM_EVENT_CLOSE;
1621 event.status = status;
1623 if (ep->com.cm_id) {
1625 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1626 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1627 deref_cm_id(&ep->com);
1628 set_bit(CLOSE_UPCALL, &ep->com.history);
1630 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1634 send_abort(struct c4iw_ep *ep)
1636 struct socket *so = ep->com.so;
1637 struct sockopt sopt;
1641 CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1642 states[ep->com.state], ep->hwtid);
1647 /* linger_time of 0 forces RST to be sent */
1648 sopt.sopt_dir = SOPT_SET;
1649 sopt.sopt_level = SOL_SOCKET;
1650 sopt.sopt_name = SO_LINGER;
1651 sopt.sopt_val = (caddr_t)&l;
1652 sopt.sopt_valsize = sizeof l;
1653 sopt.sopt_td = NULL;
1654 rc = -sosetopt(so, &sopt);
1656 log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1660 uninit_iwarp_socket(so);
1662 set_bit(ABORT_CONN, &ep->com.history);
1665 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1666 * request it has sent. But the current TOE driver is not propagating
1667 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1668 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1669 * handler(not yet implemented) of iw_cxgbe driver.
1671 release_ep_resources(ep);
1672 ep->com.state = DEAD;
1677 static void peer_close_upcall(struct c4iw_ep *ep)
1679 struct iw_cm_event event;
1681 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1682 memset(&event, 0, sizeof(event));
1683 event.event = IW_CM_EVENT_DISCONNECT;
1685 if (ep->com.cm_id) {
1687 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1688 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1689 set_bit(DISCONN_UPCALL, &ep->com.history);
1691 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1694 static void peer_abort_upcall(struct c4iw_ep *ep)
1696 struct iw_cm_event event;
1698 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1699 memset(&event, 0, sizeof(event));
1700 event.event = IW_CM_EVENT_CLOSE;
1701 event.status = -ECONNRESET;
1703 if (ep->com.cm_id) {
1705 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1706 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1707 deref_cm_id(&ep->com);
1708 set_bit(ABORT_UPCALL, &ep->com.history);
1710 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1713 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1715 struct iw_cm_event event;
1717 CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1718 memset(&event, 0, sizeof(event));
1719 event.event = IW_CM_EVENT_CONNECT_REPLY;
1720 event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1721 -ECONNRESET : status;
1722 event.local_addr = ep->com.local_addr;
1723 event.remote_addr = ep->com.remote_addr;
1725 if ((status == 0) || (status == -ECONNREFUSED)) {
1727 if (!ep->tried_with_mpa_v1) {
1729 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1730 /* this means MPA_v2 is used */
1731 event.ord = ep->ird;
1732 event.ird = ep->ord;
1733 event.private_data_len = ep->plen -
1734 sizeof(struct mpa_v2_conn_params);
1735 event.private_data = ep->mpa_pkt +
1736 sizeof(struct mpa_message) +
1737 sizeof(struct mpa_v2_conn_params);
1740 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1741 /* this means MPA_v1 is used */
1742 event.ord = c4iw_max_read_depth;
1743 event.ird = c4iw_max_read_depth;
1744 event.private_data_len = ep->plen;
1745 event.private_data = ep->mpa_pkt +
1746 sizeof(struct mpa_message);
1750 if (ep->com.cm_id) {
1752 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1753 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1754 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1757 if(status == -ECONNABORTED) {
1759 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1765 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1766 deref_cm_id(&ep->com);
1769 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1772 static int connect_request_upcall(struct c4iw_ep *ep)
1774 struct iw_cm_event event;
1777 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1778 ep->tried_with_mpa_v1);
1780 memset(&event, 0, sizeof(event));
1781 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1782 event.local_addr = ep->com.local_addr;
1783 event.remote_addr = ep->com.remote_addr;
1784 event.provider_data = ep;
1786 if (!ep->tried_with_mpa_v1) {
1787 /* this means MPA_v2 is used */
1788 event.ord = ep->ord;
1789 event.ird = ep->ird;
1790 event.private_data_len = ep->plen -
1791 sizeof(struct mpa_v2_conn_params);
1792 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1793 sizeof(struct mpa_v2_conn_params);
1796 /* this means MPA_v1 is used. Send max supported */
1797 event.ord = c4iw_max_read_depth;
1798 event.ird = c4iw_max_read_depth;
1799 event.private_data_len = ep->plen;
1800 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1803 c4iw_get_ep(&ep->com);
1804 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1807 CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1808 " IWCM, err:%d", __func__, ep, ret);
1809 c4iw_put_ep(&ep->com);
1811 /* Dereference parent_ep only in success case.
1812 * In case of failure, parent_ep is dereferenced by the caller
1813 * of process_mpa_request().
1815 c4iw_put_ep(&ep->parent_ep->com);
1817 set_bit(CONNREQ_UPCALL, &ep->com.history);
1821 static void established_upcall(struct c4iw_ep *ep)
1823 struct iw_cm_event event;
1825 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1826 memset(&event, 0, sizeof(event));
1827 event.event = IW_CM_EVENT_ESTABLISHED;
1828 event.ird = ep->ord;
1829 event.ord = ep->ird;
1831 if (ep->com.cm_id) {
1833 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1834 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1835 set_bit(ESTAB_UPCALL, &ep->com.history);
1837 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1841 #define RELAXED_IRD_NEGOTIATION 1
1844 * process_mpa_reply - process streaming mode MPA reply
1848 * 0 upon success indicating a connect request was delivered to the ULP
1849 * or the mpa request is incomplete but valid so far.
1851 * 1 if a failure requires the caller to close the connection.
1853 * 2 if a failure requires the caller to abort the connection.
1855 static int process_mpa_reply(struct c4iw_ep *ep)
1857 struct mpa_message *mpa;
1858 struct mpa_v2_conn_params *mpa_v2_params;
1860 u16 resp_ird, resp_ord;
1861 u8 rtr_mismatch = 0, insuff_ird = 0;
1862 struct c4iw_qp_attributes attrs = {0};
1863 enum c4iw_qp_attr_mask mask;
1865 struct mbuf *top, *m;
1866 int flags = MSG_DONTWAIT;
1870 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1873 * Stop mpa timer. If it expired, then
1874 * we ignore the MPA reply. process_timeout()
1875 * will abort the connection.
1877 if (STOP_EP_TIMER(ep))
1880 uio.uio_resid = 1000000;
1881 uio.uio_td = ep->com.thread;
1882 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1886 if (err == EWOULDBLOCK) {
1888 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1893 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1897 if (ep->com.so->so_rcv.sb_mb) {
1899 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1900 printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1901 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1908 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1910 * If we get more than the supported amount of private data
1911 * then we must fail this connection.
1913 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1915 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1916 ep->mpa_pkt_len + m->m_len);
1918 goto err_stop_timer;
1922 * copy the new data into our accumulation buffer.
1924 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1925 ep->mpa_pkt_len += m->m_len;
1934 * if we don't even have the mpa message, then bail.
1936 if (ep->mpa_pkt_len < sizeof(*mpa)) {
1939 mpa = (struct mpa_message *) ep->mpa_pkt;
1941 /* Validate MPA header. */
1942 if (mpa->revision > mpa_rev) {
1944 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1945 mpa->revision, mpa_rev);
1946 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1947 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1949 goto err_stop_timer;
1952 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1954 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1956 goto err_stop_timer;
1959 plen = ntohs(mpa->private_data_size);
1962 * Fail if there's too much private data.
1964 if (plen > MPA_MAX_PRIVATE_DATA) {
1966 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1968 goto err_stop_timer;
1972 * If plen does not account for pkt size
1974 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1976 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1979 goto err_stop_timer;
1982 ep->plen = (u8) plen;
1985 * If we don't have all the pdata yet, then bail.
1986 * We'll continue process when more data arrives.
1988 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1990 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1994 if (mpa->flags & MPA_REJECT) {
1996 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1997 err = -ECONNREFUSED;
1998 goto err_stop_timer;
2002 * If we get here we have accumulated the entire mpa
2003 * start reply message including private data. And
2004 * the MPA header is valid.
2006 ep->com.state = FPDU_MODE;
2007 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2008 ep->mpa_attr.recv_marker_enabled = markers_enabled;
2009 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2010 ep->mpa_attr.version = mpa->revision;
2011 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2013 if (mpa->revision == 2) {
2015 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2016 ep->mpa_attr.enhanced_rdma_conn =
2017 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2019 if (ep->mpa_attr.enhanced_rdma_conn) {
2021 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2022 mpa_v2_params = (struct mpa_v2_conn_params *)
2023 (ep->mpa_pkt + sizeof(*mpa));
2024 resp_ird = ntohs(mpa_v2_params->ird) &
2025 MPA_V2_IRD_ORD_MASK;
2026 resp_ord = ntohs(mpa_v2_params->ord) &
2027 MPA_V2_IRD_ORD_MASK;
2030 * This is a double-check. Ideally, below checks are
2031 * not required since ird/ord stuff has been taken
2032 * care of in c4iw_accept_cr
2034 if (ep->ird < resp_ord) {
2035 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2036 ep->com.dev->rdev.adap->params.max_ordird_qp)
2040 } else if (ep->ird > resp_ord) {
2043 if (ep->ord > resp_ird) {
2044 if (RELAXED_IRD_NEGOTIATION)
2055 if (ntohs(mpa_v2_params->ird) &
2056 MPA_V2_PEER2PEER_MODEL) {
2058 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2059 if (ntohs(mpa_v2_params->ord) &
2060 MPA_V2_RDMA_WRITE_RTR) {
2062 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2063 ep->mpa_attr.p2p_type =
2064 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2066 else if (ntohs(mpa_v2_params->ord) &
2067 MPA_V2_RDMA_READ_RTR) {
2069 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2070 ep->mpa_attr.p2p_type =
2071 FW_RI_INIT_P2PTYPE_READ_REQ;
2077 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2079 if (mpa->revision == 1) {
2081 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2085 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2086 ep->mpa_attr.p2p_type = p2p_type;
2091 if (set_tcpinfo(ep)) {
2093 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2094 printf("%s set_tcpinfo error\n", __func__);
2099 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2100 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2101 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2102 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2103 ep->mpa_attr.p2p_type);
2106 * If responder's RTR does not match with that of initiator, assign
2107 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2108 * generated when moving QP to RTS state.
2109 * A TERM message will be sent after QP has moved to RTS state
2111 if ((ep->mpa_attr.version == 2) && peer2peer &&
2112 (ep->mpa_attr.p2p_type != p2p_type)) {
2114 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2115 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2120 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2121 attrs.mpa_attr = ep->mpa_attr;
2122 attrs.max_ird = ep->ird;
2123 attrs.max_ord = ep->ord;
2124 attrs.llp_stream_handle = ep;
2125 attrs.next_state = C4IW_QP_STATE_RTS;
2127 mask = C4IW_QP_ATTR_NEXT_STATE |
2128 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2129 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2131 /* bind QP and TID with INIT_WR */
2132 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2136 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2141 * If responder's RTR requirement did not match with what initiator
2142 * supports, generate TERM message
2146 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2147 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2148 attrs.layer_etype = LAYER_MPA | DDP_LLP;
2149 attrs.ecode = MPA_NOMATCH_RTR;
2150 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2151 attrs.send_term = 1;
2152 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2153 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2160 * Generate TERM if initiator IRD is not sufficient for responder
2161 * provided ORD. Currently, we do the same behaviour even when
2162 * responder provided IRD is also not sufficient as regards to
2167 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2168 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2170 attrs.layer_etype = LAYER_MPA | DDP_LLP;
2171 attrs.ecode = MPA_INSUFF_IRD;
2172 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2173 attrs.send_term = 1;
2174 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2175 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2186 connect_reply_upcall(ep, err);
2187 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2192 * process_mpa_request - process streaming mode MPA request
2196 * 0 upon success indicating a connect request was delivered to the ULP
2197 * or the mpa request is incomplete but valid so far.
2199 * 1 if a failure requires the caller to close the connection.
2201 * 2 if a failure requires the caller to abort the connection.
2204 process_mpa_request(struct c4iw_ep *ep)
2206 struct mpa_message *mpa;
2207 struct mpa_v2_conn_params *mpa_v2_params;
2209 int flags = MSG_DONTWAIT;
2213 enum c4iw_ep_state state = ep->com.state;
2215 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2217 if (state != MPA_REQ_WAIT)
2220 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2221 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2225 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2226 uio.uio_segflg = UIO_SYSSPACE;
2227 uio.uio_rw = UIO_READ;
2228 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2230 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2234 goto err_stop_timer;
2236 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2237 __func__, ep->com.so));
2238 ep->mpa_pkt_len += uio.uio_offset;
2241 * If we get more than the supported amount of private data then we must
2242 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another
2243 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2244 * byte is filled by the soreceive above.
2247 /* Don't even have the MPA message. Wait for more data to arrive. */
2248 if (ep->mpa_pkt_len < sizeof(*mpa))
2250 mpa = (struct mpa_message *) ep->mpa_pkt;
2253 * Validate MPA Header.
2255 if (mpa->revision > mpa_rev) {
2256 log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2257 " Received = %d\n", __func__, mpa_rev, mpa->revision);
2258 goto err_stop_timer;
2261 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2262 goto err_stop_timer;
2265 * Fail if there's too much private data.
2267 plen = ntohs(mpa->private_data_size);
2268 if (plen > MPA_MAX_PRIVATE_DATA)
2269 goto err_stop_timer;
2272 * If plen does not account for pkt size
2274 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2275 goto err_stop_timer;
2277 ep->plen = (u8) plen;
2280 * If we don't have all the pdata yet, then bail.
2282 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2286 * If we get here we have accumulated the entire mpa
2287 * start reply message including private data.
2289 ep->mpa_attr.initiator = 0;
2290 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2291 ep->mpa_attr.recv_marker_enabled = markers_enabled;
2292 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2293 ep->mpa_attr.version = mpa->revision;
2294 if (mpa->revision == 1)
2295 ep->tried_with_mpa_v1 = 1;
2296 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2298 if (mpa->revision == 2) {
2299 ep->mpa_attr.enhanced_rdma_conn =
2300 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2301 if (ep->mpa_attr.enhanced_rdma_conn) {
2302 mpa_v2_params = (struct mpa_v2_conn_params *)
2303 (ep->mpa_pkt + sizeof(*mpa));
2304 ep->ird = ntohs(mpa_v2_params->ird) &
2305 MPA_V2_IRD_ORD_MASK;
2306 ep->ird = min_t(u32, ep->ird,
2307 cur_max_read_depth(ep->com.dev));
2308 ep->ord = ntohs(mpa_v2_params->ord) &
2309 MPA_V2_IRD_ORD_MASK;
2310 ep->ord = min_t(u32, ep->ord,
2311 cur_max_read_depth(ep->com.dev));
2312 CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
2313 __func__, ep->ird, ep->ord);
2314 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2316 if (ntohs(mpa_v2_params->ord) &
2317 MPA_V2_RDMA_WRITE_RTR)
2318 ep->mpa_attr.p2p_type =
2319 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2320 else if (ntohs(mpa_v2_params->ord) &
2321 MPA_V2_RDMA_READ_RTR)
2322 ep->mpa_attr.p2p_type =
2323 FW_RI_INIT_P2PTYPE_READ_REQ;
2326 } else if (mpa->revision == 1 && peer2peer)
2327 ep->mpa_attr.p2p_type = p2p_type;
2329 if (set_tcpinfo(ep))
2330 goto err_stop_timer;
2332 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2333 "xmit_marker_enabled = %d, version = %d", __func__,
2334 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2335 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2337 ep->com.state = MPA_REQ_RCVD;
2341 if (ep->parent_ep->com.state != DEAD)
2342 if (connect_request_upcall(ep))
2353 * Upcall from the adapter indicating data has been transmitted.
2354 * For us its just the single MPA request or reply. We can now free
2355 * the skb holding the mpa message.
2357 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2360 struct c4iw_ep *ep = to_ep(cm_id);
2363 mutex_lock(&ep->com.mutex);
2364 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2366 if ((ep->com.state == DEAD) ||
2367 (ep->com.state != MPA_REQ_RCVD)) {
2369 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2370 mutex_unlock(&ep->com.mutex);
2371 c4iw_put_ep(&ep->com);
2374 set_bit(ULP_REJECT, &ep->com.history);
2378 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2383 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2384 abort = send_mpa_reject(ep, pdata, pdata_len);
2387 err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2388 mutex_unlock(&ep->com.mutex);
2389 c4iw_put_ep(&ep->com);
2390 CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2394 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2397 struct c4iw_qp_attributes attrs = {0};
2398 enum c4iw_qp_attr_mask mask;
2399 struct c4iw_ep *ep = to_ep(cm_id);
2400 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2401 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2404 mutex_lock(&ep->com.mutex);
2405 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2407 if ((ep->com.state == DEAD) ||
2408 (ep->com.state != MPA_REQ_RCVD)) {
2410 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2417 set_bit(ULP_ACCEPT, &ep->com.history);
2419 if ((conn_param->ord > c4iw_max_read_depth) ||
2420 (conn_param->ird > c4iw_max_read_depth)) {
2422 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2427 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2429 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2431 if (conn_param->ord > ep->ird) {
2432 if (RELAXED_IRD_NEGOTIATION) {
2433 conn_param->ord = ep->ird;
2435 ep->ird = conn_param->ird;
2436 ep->ord = conn_param->ord;
2437 send_mpa_reject(ep, conn_param->private_data,
2438 conn_param->private_data_len);
2443 if (conn_param->ird < ep->ord) {
2444 if (RELAXED_IRD_NEGOTIATION &&
2445 ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2446 conn_param->ird = ep->ord;
2453 ep->ird = conn_param->ird;
2454 ep->ord = conn_param->ord;
2456 if (ep->mpa_attr.version == 1) {
2457 if (peer2peer && ep->ird == 0)
2461 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2462 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2466 CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
2469 ep->com.cm_id = cm_id;
2470 ref_cm_id(&ep->com);
2473 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2475 /* bind QP to EP and move to RTS */
2476 attrs.mpa_attr = ep->mpa_attr;
2477 attrs.max_ird = ep->ird;
2478 attrs.max_ord = ep->ord;
2479 attrs.llp_stream_handle = ep;
2480 attrs.next_state = C4IW_QP_STATE_RTS;
2482 /* bind QP and TID with INIT_WR */
2483 mask = C4IW_QP_ATTR_NEXT_STATE |
2484 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2485 C4IW_QP_ATTR_MPA_ATTR |
2486 C4IW_QP_ATTR_MAX_IRD |
2487 C4IW_QP_ATTR_MAX_ORD;
2489 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2491 CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2492 goto err_defef_cm_id;
2495 err = send_mpa_reply(ep, conn_param->private_data,
2496 conn_param->private_data_len);
2498 CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2499 goto err_defef_cm_id;
2502 ep->com.state = FPDU_MODE;
2503 established_upcall(ep);
2504 mutex_unlock(&ep->com.mutex);
2505 c4iw_put_ep(&ep->com);
2506 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2509 deref_cm_id(&ep->com);
2514 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2515 mutex_unlock(&ep->com.mutex);
2516 c4iw_put_ep(&ep->com);
2517 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2522 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2526 struct socket *sock = NULL;
2527 struct sockopt sopt;
2529 ret = sock_create_kern(laddr->ss_family,
2530 SOCK_STREAM, IPPROTO_TCP, &sock);
2532 CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2538 bzero(&sopt, sizeof(struct sockopt));
2539 sopt.sopt_dir = SOPT_SET;
2540 sopt.sopt_level = SOL_SOCKET;
2541 sopt.sopt_name = SO_REUSEADDR;
2543 sopt.sopt_val = &on;
2544 sopt.sopt_valsize = sizeof(on);
2545 ret = -sosetopt(sock, &sopt);
2547 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
2548 "failed with %d.\n", __func__, sock, ret);
2550 bzero(&sopt, sizeof(struct sockopt));
2551 sopt.sopt_dir = SOPT_SET;
2552 sopt.sopt_level = SOL_SOCKET;
2553 sopt.sopt_name = SO_REUSEPORT;
2555 sopt.sopt_val = &on;
2556 sopt.sopt_valsize = sizeof(on);
2557 ret = -sosetopt(sock, &sopt);
2559 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
2560 "failed with %d.\n", __func__, sock, ret);
2564 ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
2566 CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2572 size = laddr->ss_family == AF_INET6 ?
2573 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2574 ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2576 CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2586 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2589 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2590 struct c4iw_ep *ep = NULL;
2591 struct ifnet *nh_ifp; /* Logical egress interface */
2593 struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2594 struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2597 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2600 if ((conn_param->ord > c4iw_max_read_depth) ||
2601 (conn_param->ird > c4iw_max_read_depth)) {
2603 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2607 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2608 cm_id->provider_data = ep;
2610 init_timer(&ep->timer);
2611 ep->plen = conn_param->private_data_len;
2615 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2616 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2617 conn_param->private_data, ep->plen);
2619 ep->ird = conn_param->ird;
2620 ep->ord = conn_param->ord;
2622 if (peer2peer && ep->ord == 0) {
2624 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2629 ep->com.cm_id = cm_id;
2630 ref_cm_id(&ep->com);
2631 ep->com.qp = get_qhp(dev, conn_param->qpn);
2635 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2640 ep->com.thread = curthread;
2643 err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2648 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2649 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2654 if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2655 TOEDEV(nh_ifp) == NULL) {
2659 ep->com.state = CONNECTING;
2661 ep->com.local_addr = cm_id->local_addr;
2662 ep->com.remote_addr = cm_id->remote_addr;
2664 err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2668 setiwsockopt(ep->com.so);
2669 init_iwarp_socket(ep->com.so, &ep->com);
2670 err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2674 CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2678 uninit_iwarp_socket(ep->com.so);
2679 ep->com.state = DEAD;
2680 sock_release(ep->com.so);
2682 deref_cm_id(&ep->com);
2683 c4iw_put_ep(&ep->com);
2686 CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2691 * iwcm->create_listen. Returns -errno on failure.
2694 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2696 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2697 struct c4iw_listen_ep *lep = NULL;
2698 struct listen_port_info *port_info = NULL;
2701 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2703 lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2704 lep->com.cm_id = cm_id;
2705 ref_cm_id(&lep->com);
2707 lep->backlog = backlog;
2708 lep->com.local_addr = cm_id->local_addr;
2709 lep->com.thread = curthread;
2710 cm_id->provider_data = lep;
2711 lep->com.state = LISTEN;
2713 /* In case of INDADDR_ANY, ibcore creates cmid for each device and
2714 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2715 * HW listeners for each device seperately. But toecore expects single
2716 * solisten() call with INADDR_ANY address to create HW listeners on
2717 * all devices for a given port number. So iw_cxgbe driver calls
2718 * solisten() only once for INADDR_ANY(usually done at first time
2719 * listener callback from ibcore). And all the subsequent INADDR_ANY
2720 * listener callbacks from ibcore(for the same port address) do not
2721 * invoke solisten() as first listener callback has already created
2722 * listeners for all other devices(via solisten).
2724 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2725 port_info = add_ep_to_listenlist(lep);
2726 /* skip solisten() if refcnt > 1, as the listeners were
2727 * alredy created by 'Master lep'
2729 if (port_info->refcnt > 1) {
2730 /* As there will be only one listener socket for a TCP
2731 * port, copy Master lep's socket pointer to other lep's
2732 * that are belonging to same TCP port.
2734 struct c4iw_listen_ep *head_lep =
2735 container_of(port_info->lep_list.next,
2736 struct c4iw_listen_ep, listen_ep_list);
2737 lep->com.so = head_lep->com.so;
2741 rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2743 CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2748 rc = -solisten(lep->com.so, backlog, curthread);
2750 CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2751 __func__, lep->com.so, rc);
2754 init_iwarp_socket(lep->com.so, &lep->com);
2759 sock_release(lep->com.so);
2762 rem_ep_from_listenlist(lep);
2763 deref_cm_id(&lep->com);
2764 c4iw_put_ep(&lep->com);
2769 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2771 struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2773 mutex_lock(&lep->com.mutex);
2774 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2775 states[lep->com.state]);
2777 lep->com.state = DEAD;
2778 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2779 /* if no refcount then close listen socket */
2780 if (!rem_ep_from_listenlist(lep))
2781 close_socket(lep->com.so);
2783 close_socket(lep->com.so);
2784 deref_cm_id(&lep->com);
2785 mutex_unlock(&lep->com.mutex);
2786 c4iw_put_ep(&lep->com);
2790 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2793 mutex_lock(&ep->com.mutex);
2794 ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2795 mutex_unlock(&ep->com.mutex);
2799 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2804 struct c4iw_rdev *rdev;
2807 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2809 rdev = &ep->com.dev->rdev;
2811 if (c4iw_fatal_error(rdev)) {
2813 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2815 close_complete_upcall(ep, -ECONNRESET);
2817 ep->com.state = DEAD;
2819 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2820 states[ep->com.state]);
2823 * Ref the ep here in case we have fatal errors causing the
2824 * ep to be released and freed.
2826 c4iw_get_ep(&ep->com);
2827 switch (ep->com.state) {
2836 ep->com.state = ABORTING;
2838 ep->com.state = CLOSING;
2841 set_bit(CLOSE_SENT, &ep->com.flags);
2846 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2851 ep->com.state = ABORTING;
2853 ep->com.state = MORIBUND;
2861 "%s ignoring disconnect ep %p state %u", __func__,
2873 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2877 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2878 set_bit(EP_DISC_ABORT, &ep->com.history);
2879 close_complete_upcall(ep, -ECONNRESET);
2880 ret = send_abort(ep);
2885 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2886 set_bit(EP_DISC_CLOSE, &ep->com.history);
2889 ep->com.state = MORIBUND;
2891 CURVNET_SET(ep->com.so->so_vnet);
2892 sodisconnect(ep->com.so);
2899 set_bit(EP_DISC_FAIL, &ep->com.history);
2902 close_complete_upcall(ep, -EIO);
2905 struct c4iw_qp_attributes attrs = {0};
2907 attrs.next_state = C4IW_QP_STATE_ERROR;
2908 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2909 C4IW_QP_ATTR_NEXT_STATE,
2912 CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2913 printf("%s - qp <- error failed!\n", __func__);
2916 release_ep_resources(ep);
2917 ep->com.state = DEAD;
2918 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2920 c4iw_put_ep(&ep->com);
2921 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2925 #ifdef C4IW_EP_REDIRECT
2926 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2927 struct l2t_entry *l2t)
2929 struct c4iw_ep *ep = ctx;
2934 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2937 cxgb4_l2t_release(ep->l2t);
2947 static void ep_timeout(unsigned long arg)
2949 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2951 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2954 * Only insert if it is not already on the list.
2956 if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2957 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2958 add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2963 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2965 uint64_t val = be64toh(*rpl);
2967 struct c4iw_wr_wait *wr_waitp;
2969 ret = (int)((val >> 8) & 0xff);
2970 wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2971 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2973 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2978 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2980 struct cqe_list_entry *cle;
2983 cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2984 cle->rhp = sc->iwarp_softc;
2985 cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2987 spin_lock_irqsave(&err_cqe_lock, flag);
2988 list_add_tail(&cle->entry, &err_cqe_list);
2989 queue_work(c4iw_taskq, &c4iw_task);
2990 spin_unlock_irqrestore(&err_cqe_lock, flag);
2996 process_terminate(struct c4iw_ep *ep)
2998 struct c4iw_qp_attributes attrs = {0};
3000 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
3002 if (ep && ep->com.qp) {
3004 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
3005 ep->hwtid, ep->com.qp->wq.sq.qid);
3006 attrs.next_state = C4IW_QP_STATE_TERMINATE;
3007 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
3010 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
3012 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
3017 int __init c4iw_cm_init(void)
3020 t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
3021 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
3022 t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
3023 t4_register_an_handler(c4iw_ev_handler);
3025 TAILQ_INIT(&req_list);
3026 spin_lock_init(&req_lock);
3027 INIT_LIST_HEAD(&err_cqe_list);
3028 spin_lock_init(&err_cqe_lock);
3030 INIT_WORK(&c4iw_task, process_req);
3032 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
3039 void __exit c4iw_cm_term(void)
3041 WARN_ON(!TAILQ_EMPTY(&req_list));
3042 WARN_ON(!list_empty(&err_cqe_list));
3043 flush_workqueue(c4iw_taskq);
3044 destroy_workqueue(c4iw_taskq);
3046 t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3047 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3048 t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3049 t4_register_an_handler(NULL);