2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet6/in6_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/in_fib.h>
54 #include <netinet6/in6_fib.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcpip.h>
61 #include <netinet/toecore.h>
65 struct cpl_set_tcb_rpl;
66 #include <linux/types.h>
68 #include "tom/t4_tom.h"
70 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe))
73 #include <linux/module.h>
74 #include <linux/workqueue.h>
75 #include <linux/notifier.h>
76 #include <linux/inetdevice.h>
77 #include <linux/if_vlan.h>
78 #include <net/netevent.h>
79 #include <rdma/rdma_cm.h>
81 static spinlock_t req_lock;
82 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
83 static struct work_struct c4iw_task;
84 static struct workqueue_struct *c4iw_taskq;
85 static LIST_HEAD(err_cqe_list);
86 static spinlock_t err_cqe_lock;
87 static LIST_HEAD(listen_port_list);
88 static DEFINE_MUTEX(listen_port_mutex);
90 static void process_req(struct work_struct *ctx);
91 static void start_ep_timer(struct c4iw_ep *ep);
92 static int stop_ep_timer(struct c4iw_ep *ep);
93 static int set_tcpinfo(struct c4iw_ep *ep);
94 static void process_timeout(struct c4iw_ep *ep);
95 static void process_err_cqes(void);
96 static void *alloc_ep(int size, gfp_t flags);
97 static void close_socket(struct socket *so);
98 static int send_mpa_req(struct c4iw_ep *ep);
99 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
100 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
101 static void close_complete_upcall(struct c4iw_ep *ep, int status);
102 static int send_abort(struct c4iw_ep *ep);
103 static void peer_close_upcall(struct c4iw_ep *ep);
104 static void peer_abort_upcall(struct c4iw_ep *ep);
105 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
106 static int connect_request_upcall(struct c4iw_ep *ep);
107 static void established_upcall(struct c4iw_ep *ep);
108 static int process_mpa_reply(struct c4iw_ep *ep);
109 static int process_mpa_request(struct c4iw_ep *ep);
110 static void process_peer_close(struct c4iw_ep *ep);
111 static void process_conn_error(struct c4iw_ep *ep);
112 static void process_close_complete(struct c4iw_ep *ep);
113 static void ep_timeout(unsigned long arg);
114 static void setiwsockopt(struct socket *so);
115 static void init_iwarp_socket(struct socket *so, void *arg);
116 static void uninit_iwarp_socket(struct socket *so);
117 static void process_data(struct c4iw_ep *ep);
118 static void process_connected(struct c4iw_ep *ep);
119 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
120 static void process_socket_event(struct c4iw_ep *ep);
121 static void release_ep_resources(struct c4iw_ep *ep);
122 static int process_terminate(struct c4iw_ep *ep);
123 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
125 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
126 static struct listen_port_info *
127 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
128 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
129 static struct c4iw_listen_ep *
130 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
131 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
133 static void process_newconn(struct c4iw_listen_ep *master_lep,
134 struct socket *new_so);
135 #define START_EP_TIMER(ep) \
137 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
138 __func__, __LINE__, (ep)); \
139 start_ep_timer(ep); \
142 #define STOP_EP_TIMER(ep) \
144 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
145 __func__, __LINE__, (ep)); \
149 #define GET_LOCAL_ADDR(pladdr, so) \
151 struct sockaddr_storage *__a = NULL; \
152 struct inpcb *__inp = sotoinpcb(so); \
153 KASSERT(__inp != NULL, \
154 ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
155 if (__inp->inp_vflag & INP_IPV4) \
156 in_getsockaddr(so, (struct sockaddr **)&__a); \
158 in6_getsockaddr(so, (struct sockaddr **)&__a); \
160 free(__a, M_SONAME); \
163 #define GET_REMOTE_ADDR(praddr, so) \
165 struct sockaddr_storage *__a = NULL; \
166 struct inpcb *__inp = sotoinpcb(so); \
167 KASSERT(__inp != NULL, \
168 ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
169 if (__inp->inp_vflag & INP_IPV4) \
170 in_getpeeraddr(so, (struct sockaddr **)&__a); \
172 in6_getpeeraddr(so, (struct sockaddr **)&__a); \
174 free(__a, M_SONAME); \
177 static char *states[] = {
193 static void deref_cm_id(struct c4iw_ep_common *epc)
195 epc->cm_id->rem_ref(epc->cm_id);
197 set_bit(CM_ID_DEREFED, &epc->history);
200 static void ref_cm_id(struct c4iw_ep_common *epc)
202 set_bit(CM_ID_REFED, &epc->history);
203 epc->cm_id->add_ref(epc->cm_id);
206 static void deref_qp(struct c4iw_ep *ep)
208 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
209 clear_bit(QP_REFERENCED, &ep->com.flags);
210 set_bit(QP_DEREFED, &ep->com.history);
213 static void ref_qp(struct c4iw_ep *ep)
215 set_bit(QP_REFERENCED, &ep->com.flags);
216 set_bit(QP_REFED, &ep->com.history);
217 c4iw_qp_add_ref(&ep->com.qp->ibqp);
219 /* allocated per TCP port while listening */
220 struct listen_port_info {
221 uint16_t port_num; /* TCP port address */
222 struct list_head list; /* belongs to listen_port_list */
223 struct list_head lep_list; /* per port lep list */
224 uint32_t refcnt; /* number of lep's listening */
228 * Following two lists are used to manage INADDR_ANY listeners:
232 * Below is the INADDR_ANY listener lists overview on a system with a two port
234 * |------------------|
235 * |listen_port_list |
236 * |------------------|
238 * | |-----------| |-----------|
239 * | | port_num:X| | port_num:X|
240 * |--------------|-list------|-------|-list------|-------....
241 * | lep_list----| | lep_list----|
242 * | refcnt | | | refcnt | |
245 * |-----------| | |-----------| |
250 * | | |----------------| |----------------|
251 * | |----| listen_ep_list |----| listen_ep_list |
252 * | |----------------| |----------------|
256 * | |----------------| |----------------|
257 * |---| listen_ep_list |----| listen_ep_list |
258 * |----------------| |----------------|
260 * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
261 * each TCP port number.
263 * Here 'lep1' is always marked as Master lep, because solisten() is always
264 * called through first lep.
267 static struct listen_port_info *
268 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
271 struct listen_port_info *port_info = NULL;
272 struct sockaddr_storage *laddr = &lep->com.local_addr;
274 port = (laddr->ss_family == AF_INET) ?
275 ((struct sockaddr_in *)laddr)->sin_port :
276 ((struct sockaddr_in6 *)laddr)->sin6_port;
278 mutex_lock(&listen_port_mutex);
280 list_for_each_entry(port_info, &listen_port_list, list)
281 if (port_info->port_num == port)
284 port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
285 port_info->port_num = port;
286 port_info->refcnt = 0;
288 list_add_tail(&port_info->list, &listen_port_list);
289 INIT_LIST_HEAD(&port_info->lep_list);
293 list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
294 mutex_unlock(&listen_port_mutex);
299 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
302 struct listen_port_info *port_info = NULL;
303 struct sockaddr_storage *laddr = &lep->com.local_addr;
306 port = (laddr->ss_family == AF_INET) ?
307 ((struct sockaddr_in *)laddr)->sin_port :
308 ((struct sockaddr_in6 *)laddr)->sin6_port;
310 mutex_lock(&listen_port_mutex);
312 /* get the port_info structure based on the lep's port address */
313 list_for_each_entry(port_info, &listen_port_list, list) {
314 if (port_info->port_num == port) {
316 refcnt = port_info->refcnt;
317 /* remove the current lep from the listen list */
318 list_del(&lep->listen_ep_list);
319 if (port_info->refcnt == 0) {
320 /* Remove this entry from the list as there
321 * are no more listeners for this port_num.
323 list_del(&port_info->list);
329 mutex_unlock(&listen_port_mutex);
334 * Find the lep that belongs to the ifnet on which the SYN frame was received.
336 struct c4iw_listen_ep *
337 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
339 struct adapter *adap = NULL;
340 struct c4iw_listen_ep *lep = NULL;
341 struct ifnet *ifp = NULL, *hw_ifp = NULL;
342 struct listen_port_info *port_info = NULL;
343 int i = 0, found_portinfo = 0, found_lep = 0;
347 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
348 * interfaces like vlan, lagg, etc..
349 * TBD: lagg support, lagg + vlan support.
351 ifp = TOEPCB(so)->l2te->ifp;
352 if (ifp->if_type == IFT_L2VLAN) {
353 hw_ifp = VLAN_TRUNKDEV(ifp);
354 if (hw_ifp == NULL) {
355 CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
356 "vlan ifnet %p, sock %p, master_lep %p",
357 __func__, ifp, so, master_lep);
363 /* STEP 2: Find 'port_info' with listener local port address. */
364 port = (master_lep->com.local_addr.ss_family == AF_INET) ?
365 ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
366 ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
369 mutex_lock(&listen_port_mutex);
370 list_for_each_entry(port_info, &listen_port_list, list)
371 if (port_info->port_num == port) {
378 /* STEP 3: Traverse through list of lep's that are bound to the current
379 * TCP port address and find the lep that belongs to the ifnet on which
380 * the SYN frame was received.
382 list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
383 adap = lep->com.dev->rdev.adap;
384 for_each_port(adap, i) {
385 if (hw_ifp == adap->port[i]->vi[0].ifp) {
392 mutex_unlock(&listen_port_mutex);
393 return found_lep ? lep : (NULL);
396 static void process_timeout(struct c4iw_ep *ep)
398 struct c4iw_qp_attributes attrs = {0};
401 CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
402 ep, ep->hwtid, ep->com.state);
403 set_bit(TIMEDOUT, &ep->com.history);
404 switch (ep->com.state) {
406 connect_reply_upcall(ep, -ETIMEDOUT);
415 if (ep->com.cm_id && ep->com.qp) {
416 attrs.next_state = C4IW_QP_STATE_ERROR;
417 c4iw_modify_qp(ep->com.dev, ep->com.qp,
418 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
420 close_complete_upcall(ep, -ETIMEDOUT);
425 * These states are expected if the ep timed out at the same
426 * time as another thread was calling stop_ep_timer().
427 * So we silently do nothing for these states.
432 CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
433 , __func__, ep, ep->hwtid, ep->com.state);
437 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
438 c4iw_put_ep(&ep->com);
442 struct cqe_list_entry {
443 struct list_head entry;
444 struct c4iw_dev *rhp;
445 struct t4_cqe err_cqe;
449 process_err_cqes(void)
452 struct cqe_list_entry *cle;
454 spin_lock_irqsave(&err_cqe_lock, flag);
455 while (!list_empty(&err_cqe_list)) {
456 struct list_head *tmp;
457 tmp = err_cqe_list.next;
459 tmp->next = tmp->prev = NULL;
460 spin_unlock_irqrestore(&err_cqe_lock, flag);
461 cle = list_entry(tmp, struct cqe_list_entry, entry);
462 c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
464 spin_lock_irqsave(&err_cqe_lock, flag);
466 spin_unlock_irqrestore(&err_cqe_lock, flag);
472 process_req(struct work_struct *ctx)
474 struct c4iw_ep_common *epc;
479 spin_lock_irqsave(&req_lock, flag);
480 while (!TAILQ_EMPTY(&req_list)) {
481 epc = TAILQ_FIRST(&req_list);
482 TAILQ_REMOVE(&req_list, epc, entry);
483 epc->entry.tqe_prev = NULL;
484 ep_events = epc->ep_events;
486 spin_unlock_irqrestore(&req_lock, flag);
487 mutex_lock(&epc->mutex);
488 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
489 __func__, epc->so, epc, states[epc->state], ep_events);
490 if (ep_events & C4IW_EVENT_TERM)
491 process_terminate((struct c4iw_ep *)epc);
492 if (ep_events & C4IW_EVENT_TIMEOUT)
493 process_timeout((struct c4iw_ep *)epc);
494 if (ep_events & C4IW_EVENT_SOCKET)
495 process_socket_event((struct c4iw_ep *)epc);
496 mutex_unlock(&epc->mutex);
499 spin_lock_irqsave(&req_lock, flag);
501 spin_unlock_irqrestore(&req_lock, flag);
505 * XXX: doesn't belong here in the iWARP driver.
506 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
507 * set. Is this a valid assumption for active open?
510 set_tcpinfo(struct c4iw_ep *ep)
512 struct socket *so = ep->com.so;
513 struct inpcb *inp = sotoinpcb(so);
520 if ((tp->t_flags & TF_TOE) == 0) {
522 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
528 ep->hwtid = toep->tid;
529 ep->snd_seq = tp->snd_nxt;
530 ep->rcv_seq = tp->rcv_nxt;
537 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
541 if (raddr->ss_family == AF_INET) {
542 struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
543 struct nhop4_extended nh4 = {0};
545 err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr,
549 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
551 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
552 struct nhop6_extended nh6 = {0};
553 struct in6_addr addr6;
556 memset(&addr6, 0, sizeof(addr6));
557 in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
559 err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid,
563 fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6);
566 CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
571 close_socket(struct socket *so)
573 uninit_iwarp_socket(so);
578 process_peer_close(struct c4iw_ep *ep)
580 struct c4iw_qp_attributes attrs = {0};
584 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
585 ep->com.so, states[ep->com.state]);
587 switch (ep->com.state) {
590 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
594 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
596 ep->com.state = DEAD;
597 connect_reply_upcall(ep, -ECONNABORTED);
601 close_socket(ep->com.so);
602 deref_cm_id(&ep->com);
609 * We're gonna mark this puppy DEAD, but keep
610 * the reference on it until the ULP accepts or
613 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
615 ep->com.state = CLOSING;
619 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
621 ep->com.state = CLOSING;
625 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
628 ep->com.state = CLOSING;
629 attrs.next_state = C4IW_QP_STATE_CLOSING;
630 c4iw_modify_qp(ep->com.dev, ep->com.qp,
631 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
632 peer_close_upcall(ep);
636 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
642 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
644 ep->com.state = MORIBUND;
649 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
652 if (ep->com.cm_id && ep->com.qp) {
653 attrs.next_state = C4IW_QP_STATE_IDLE;
654 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
655 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
657 close_socket(ep->com.so);
658 close_complete_upcall(ep, 0);
659 ep->com.state = DEAD;
665 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
671 panic("%s: ep %p state %d", __func__, ep,
679 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
680 c4iw_ep_disconnect(ep, 0, M_NOWAIT);
684 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
685 c4iw_put_ep(&ep->com);
687 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
692 process_conn_error(struct c4iw_ep *ep)
694 struct c4iw_qp_attributes attrs = {0};
698 state = ep->com.state;
699 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
700 __func__, ep, ep->com.so, ep->com.so->so_error,
701 states[ep->com.state]);
707 c4iw_put_ep(&ep->parent_ep->com);
712 connect_reply_upcall(ep, -ECONNRESET);
716 ep->com.rpl_err = ECONNRESET;
717 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
729 if (ep->com.cm_id && ep->com.qp) {
731 attrs.next_state = C4IW_QP_STATE_ERROR;
732 ret = c4iw_modify_qp(ep->com.qp->rhp,
733 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
737 "%s - qp <- error failed!\n",
740 peer_abort_upcall(ep);
747 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
748 __func__, ep->com.so->so_error);
752 panic("%s: ep %p state %d", __func__, ep, state);
756 if (state != ABORTING) {
757 close_socket(ep->com.so);
758 ep->com.state = DEAD;
759 c4iw_put_ep(&ep->com);
761 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
766 process_close_complete(struct c4iw_ep *ep)
768 struct c4iw_qp_attributes attrs = {0};
771 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
772 ep->com.so, states[ep->com.state]);
774 /* The cm_id may be null if we failed to connect */
775 set_bit(CLOSE_CON_RPL, &ep->com.history);
777 switch (ep->com.state) {
780 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
782 ep->com.state = MORIBUND;
786 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
790 if ((ep->com.cm_id) && (ep->com.qp)) {
792 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
794 attrs.next_state = C4IW_QP_STATE_IDLE;
795 c4iw_modify_qp(ep->com.dev,
797 C4IW_QP_ATTR_NEXT_STATE,
801 close_socket(ep->com.so);
802 close_complete_upcall(ep, 0);
803 ep->com.state = DEAD;
808 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
812 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
815 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
817 panic("%s:pcc6 %p unknown ep state", __func__, ep);
823 CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
824 release_ep_resources(ep);
826 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
831 setiwsockopt(struct socket *so)
837 sopt.sopt_dir = SOPT_SET;
838 sopt.sopt_level = IPPROTO_TCP;
839 sopt.sopt_name = TCP_NODELAY;
840 sopt.sopt_val = (caddr_t)&on;
841 sopt.sopt_valsize = sizeof on;
843 rc = -sosetopt(so, &sopt);
845 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
851 init_iwarp_socket(struct socket *so, void *arg)
853 if (SOLISTENING(so)) {
855 solisten_upcall_set(so, c4iw_so_upcall, arg);
856 so->so_state |= SS_NBIO;
859 SOCKBUF_LOCK(&so->so_rcv);
860 soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
861 so->so_state |= SS_NBIO;
862 SOCKBUF_UNLOCK(&so->so_rcv);
867 uninit_iwarp_socket(struct socket *so)
869 if (SOLISTENING(so)) {
871 solisten_upcall_set(so, NULL, NULL);
874 SOCKBUF_LOCK(&so->so_rcv);
875 soupcall_clear(so, SO_RCV);
876 SOCKBUF_UNLOCK(&so->so_rcv);
881 process_data(struct c4iw_ep *ep)
885 struct c4iw_qp_attributes attrs = {0};
887 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
888 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
890 switch (ep->com.state) {
892 disconnect = process_mpa_reply(ep);
895 disconnect = process_mpa_request(ep);
897 /* Refered in process_newconn() */
898 c4iw_put_ep(&ep->parent_ep->com);
901 MPASS(ep->com.qp != NULL);
902 attrs.next_state = C4IW_QP_STATE_TERMINATE;
903 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
904 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
905 if (ret != -EINPROGRESS)
909 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
910 "state %d, so %p, so_state 0x%x, sbused %u\n",
911 __func__, ep, ep->com.state, ep->com.so,
912 ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
916 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
921 process_connected(struct c4iw_ep *ep)
923 struct socket *so = ep->com.so;
925 if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
926 if (send_mpa_req(ep))
929 connect_reply_upcall(ep, -so->so_error);
935 ep->com.state = DEAD;
936 c4iw_put_ep(&ep->com);
940 static inline int c4iw_zero_addr(struct sockaddr *addr)
942 struct in6_addr *ip6;
944 if (addr->sa_family == AF_INET)
946 ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
948 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
949 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
950 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
954 static inline int c4iw_loopback_addr(struct sockaddr *addr)
956 if (addr->sa_family == AF_INET)
958 ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
960 return IN6_IS_ADDR_LOOPBACK(
961 &((struct sockaddr_in6 *) addr)->sin6_addr);
964 static inline int c4iw_any_addr(struct sockaddr *addr)
966 return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
970 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
972 struct c4iw_listen_ep *real_lep = NULL;
973 struct c4iw_ep *new_ep = NULL;
974 struct sockaddr_in *remote = NULL;
977 MPASS(new_so != NULL);
979 if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
980 /* Here we need to find the 'real_lep' that belongs to the
981 * incomming socket's network interface, such that the newly
982 * created 'ep' can be attached to the real 'lep'.
984 real_lep = find_real_listen_ep(master_lep, new_so);
985 if (real_lep == NULL) {
986 CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
987 "ep for sock: %p", __func__, new_so);
988 log(LOG_ERR,"%s: Could not find the real listen ep for "
989 "sock: %p\n", __func__, new_so);
990 /* FIXME: properly free the 'new_so' in failure case.
991 * Use of soabort() and soclose() are not legal
992 * here(before soaccept()).
996 } else /* for Non-Wildcard address, master_lep is always the real_lep */
997 real_lep = master_lep;
999 new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
1001 CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1002 "listening so %p, new so %p", __func__, master_lep, real_lep,
1003 new_ep, master_lep->com.so, new_so);
1005 new_ep->com.dev = real_lep->com.dev;
1006 new_ep->com.so = new_so;
1007 new_ep->com.cm_id = NULL;
1008 new_ep->com.thread = real_lep->com.thread;
1009 new_ep->parent_ep = real_lep;
1011 GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1012 GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1013 c4iw_get_ep(&real_lep->com);
1014 init_timer(&new_ep->timer);
1015 new_ep->com.state = MPA_REQ_WAIT;
1016 START_EP_TIMER(new_ep);
1018 setiwsockopt(new_so);
1019 ret = soaccept(new_so, (struct sockaddr **)&remote);
1022 "%s:listen sock:%p, new sock:%p, ret:%d",
1023 __func__, master_lep->com.so, new_so, ret);
1025 free(remote, M_SONAME);
1026 uninit_iwarp_socket(new_so);
1028 c4iw_put_ep(&new_ep->com);
1029 c4iw_put_ep(&real_lep->com);
1032 free(remote, M_SONAME);
1034 /* MPA request might have been queued up on the socket already, so we
1035 * initialize the socket/upcall_handler under lock to prevent processing
1036 * MPA request on another thread(via process_req()) simultaniously.
1038 c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1039 avoid freeing of ep before ep unlock. */
1040 mutex_lock(&new_ep->com.mutex);
1041 init_iwarp_socket(new_so, &new_ep->com);
1043 ret = process_mpa_request(new_ep);
1046 c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1047 c4iw_put_ep(&real_lep->com);
1049 mutex_unlock(&new_ep->com.mutex);
1050 c4iw_put_ep(&new_ep->com);
1055 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1059 spin_lock_irqsave(&req_lock, flag);
1060 if (ep && ep->com.so) {
1061 ep->com.ep_events |= new_ep_event;
1062 if (!ep->com.entry.tqe_prev) {
1063 c4iw_get_ep(&ep->com);
1064 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1065 queue_work(c4iw_taskq, &c4iw_task);
1068 spin_unlock_irqrestore(&req_lock, flag);
1074 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1076 struct c4iw_ep *ep = arg;
1079 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1080 __func__, so, so->so_state, ep, states[ep->com.state],
1081 ep->com.entry.tqe_prev);
1083 MPASS(ep->com.so == so);
1085 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1089 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1090 add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1097 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1099 struct adapter *sc = iq->adapter;
1100 const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1101 unsigned int tid = GET_TID(cpl);
1102 struct toepcb *toep = lookup_tid(sc, tid);
1106 INP_WLOCK(toep->inp);
1107 so = inp_inpcbtosocket(toep->inp);
1108 ep = so->so_rcv.sb_upcallarg;
1109 INP_WUNLOCK(toep->inp);
1111 CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1112 add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1118 process_socket_event(struct c4iw_ep *ep)
1120 int state = ep->com.state;
1121 struct socket *so = ep->com.so;
1123 if (ep->com.state == DEAD) {
1124 CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1125 "ep %p ep_state %s", __func__, ep, states[state]);
1129 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1130 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1131 so->so_error, so->so_rcv.sb_state, ep, states[state]);
1133 if (state == CONNECTING) {
1134 process_connected(ep);
1138 if (state == LISTEN) {
1139 struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1140 struct socket *listen_so = so, *new_so = NULL;
1143 SOLISTEN_LOCK(listen_so);
1145 error = solisten_dequeue(listen_so, &new_so,
1148 CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1149 "error %d", __func__, lep, listen_so,
1153 process_newconn(lep, new_so);
1155 /* solisten_dequeue() unlocks while return, so aquire
1156 * lock again for sol_qlen and also for next iteration.
1158 SOLISTEN_LOCK(listen_so);
1159 } while (listen_so->sol_qlen);
1160 SOLISTEN_UNLOCK(listen_so);
1165 /* connection error */
1167 process_conn_error(ep);
1172 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1173 process_peer_close(ep);
1175 * check whether socket disconnect event is pending before
1176 * returning. Fallthrough if yes.
1178 if (!(so->so_state & SS_ISDISCONNECTED))
1182 /* close complete */
1183 if (so->so_state & SS_ISDISCONNECTED) {
1184 process_close_complete(ep);
1189 if (sbused(&ep->com.so->so_rcv)) {
1194 /* Socket events for 'MPA Request Received' and 'Close Complete'
1195 * were already processed earlier in their previous events handlers.
1196 * Hence, these socket events are skipped.
1197 * And any other socket events must have handled above.
1199 MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1201 if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1202 log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1203 "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1204 __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1209 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
1211 static int dack_mode = 0;
1212 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1213 "Delayed ack mode (default = 0)");
1215 int c4iw_max_read_depth = 8;
1216 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1217 "Per-connection max ORD/IRD (default = 8)");
1219 static int enable_tcp_timestamps;
1220 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1221 "Enable tcp timestamps (default = 0)");
1223 static int enable_tcp_sack;
1224 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1225 "Enable tcp SACK (default = 0)");
1227 static int enable_tcp_window_scaling = 1;
1228 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1229 "Enable tcp window scaling (default = 1)");
1232 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1233 "Enable debug logging (default = 0)");
1235 static int peer2peer = 1;
1236 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1237 "Support peer2peer ULPs (default = 1)");
1239 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1240 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1241 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1243 static int ep_timeout_secs = 60;
1244 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1245 "CM Endpoint operation timeout in seconds (default = 60)");
1247 static int mpa_rev = 1;
1248 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1249 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1251 static int markers_enabled;
1252 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1253 "Enable MPA MARKERS (default(0) = disabled)");
1255 static int crc_enabled = 1;
1256 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1257 "Enable MPA CRC (default(1) = enabled)");
1259 static int rcv_win = 256 * 1024;
1260 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1261 "TCP receive window in bytes (default = 256KB)");
1263 static int snd_win = 128 * 1024;
1264 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1265 "TCP send window in bytes (default = 128KB)");
1268 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
1269 "Use DSGL for PBL/FastReg (default=1)");
1271 int inline_threshold = 128;
1272 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
1273 "inline vs dsgl threshold (default=128)");
1275 static int reuseaddr = 0;
1276 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
1277 "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
1280 start_ep_timer(struct c4iw_ep *ep)
1283 if (timer_pending(&ep->timer)) {
1284 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1285 printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1289 clear_bit(TIMEOUT, &ep->com.flags);
1290 c4iw_get_ep(&ep->com);
1291 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1292 ep->timer.data = (unsigned long)ep;
1293 ep->timer.function = ep_timeout;
1294 add_timer(&ep->timer);
1298 stop_ep_timer(struct c4iw_ep *ep)
1301 del_timer_sync(&ep->timer);
1302 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1303 c4iw_put_ep(&ep->com);
1310 alloc_ep(int size, gfp_t gfp)
1312 struct c4iw_ep_common *epc;
1314 epc = kzalloc(size, gfp);
1318 kref_init(&epc->kref);
1319 mutex_init(&epc->mutex);
1320 c4iw_init_wr_wait(&epc->wr_wait);
1325 void _c4iw_free_ep(struct kref *kref)
1328 struct c4iw_ep_common *epc;
1330 ep = container_of(kref, struct c4iw_ep, com.kref);
1332 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1334 if (test_bit(QP_REFERENCED, &ep->com.flags))
1336 CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1337 __func__, ep, epc->history, epc->flags);
1341 static void release_ep_resources(struct c4iw_ep *ep)
1343 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1344 set_bit(RELEASE_RESOURCES, &ep->com.flags);
1345 c4iw_put_ep(&ep->com);
1346 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1350 send_mpa_req(struct c4iw_ep *ep)
1353 struct mpa_message *mpa;
1354 struct mpa_v2_conn_params mpa_v2_params;
1356 char mpa_rev_to_use = mpa_rev;
1359 if (ep->retry_with_mpa_v1)
1361 mpalen = sizeof(*mpa) + ep->plen;
1362 if (mpa_rev_to_use == 2)
1363 mpalen += sizeof(struct mpa_v2_conn_params);
1365 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1368 CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1373 memset(mpa, 0, mpalen);
1374 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1375 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1376 (markers_enabled ? MPA_MARKERS : 0) |
1377 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1378 mpa->private_data_size = htons(ep->plen);
1379 mpa->revision = mpa_rev_to_use;
1381 if (mpa_rev_to_use == 1) {
1382 ep->tried_with_mpa_v1 = 1;
1383 ep->retry_with_mpa_v1 = 0;
1386 if (mpa_rev_to_use == 2) {
1387 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1388 sizeof(struct mpa_v2_conn_params));
1389 mpa_v2_params.ird = htons((u16)ep->ird);
1390 mpa_v2_params.ord = htons((u16)ep->ord);
1393 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1395 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1396 mpa_v2_params.ord |=
1397 htons(MPA_V2_RDMA_WRITE_RTR);
1398 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1399 mpa_v2_params.ord |=
1400 htons(MPA_V2_RDMA_READ_RTR);
1403 memcpy(mpa->private_data, &mpa_v2_params,
1404 sizeof(struct mpa_v2_conn_params));
1408 memcpy(mpa->private_data +
1409 sizeof(struct mpa_v2_conn_params),
1410 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1415 memcpy(mpa->private_data,
1416 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1417 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1420 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1423 CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1428 m_copyback(m, 0, mpalen, (void *)mpa);
1431 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1434 CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1440 ep->com.state = MPA_REQ_SENT;
1441 ep->mpa_attr.initiator = 1;
1442 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1445 connect_reply_upcall(ep, err);
1446 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1450 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1453 struct mpa_message *mpa;
1454 struct mpa_v2_conn_params mpa_v2_params;
1458 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1461 mpalen = sizeof(*mpa) + plen;
1463 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1465 mpalen += sizeof(struct mpa_v2_conn_params);
1466 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1467 ep->mpa_attr.version, mpalen);
1470 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1474 memset(mpa, 0, mpalen);
1475 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1476 mpa->flags = MPA_REJECT;
1477 mpa->revision = mpa_rev;
1478 mpa->private_data_size = htons(plen);
1480 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1482 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1483 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1484 sizeof(struct mpa_v2_conn_params));
1485 mpa_v2_params.ird = htons(((u16)ep->ird) |
1486 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1488 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1490 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1491 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1492 FW_RI_INIT_P2PTYPE_READ_REQ ?
1493 MPA_V2_RDMA_READ_RTR : 0) : 0));
1494 memcpy(mpa->private_data, &mpa_v2_params,
1495 sizeof(struct mpa_v2_conn_params));
1498 memcpy(mpa->private_data +
1499 sizeof(struct mpa_v2_conn_params), pdata, plen);
1500 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1501 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1504 memcpy(mpa->private_data, pdata, plen);
1506 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1511 m_copyback(m, 0, mpalen, (void *)mpa);
1514 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1516 ep->snd_seq += mpalen;
1517 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1521 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1524 struct mpa_message *mpa;
1526 struct mpa_v2_conn_params mpa_v2_params;
1529 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1531 mpalen = sizeof(*mpa) + plen;
1533 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1535 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1536 ep->mpa_attr.version);
1537 mpalen += sizeof(struct mpa_v2_conn_params);
1540 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1544 memset(mpa, 0, sizeof(*mpa));
1545 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1546 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1547 (markers_enabled ? MPA_MARKERS : 0);
1548 mpa->revision = ep->mpa_attr.version;
1549 mpa->private_data_size = htons(plen);
1551 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1553 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1554 mpa->private_data_size +=
1555 htons(sizeof(struct mpa_v2_conn_params));
1556 mpa_v2_params.ird = htons((u16)ep->ird);
1557 mpa_v2_params.ord = htons((u16)ep->ord);
1558 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1559 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1561 if (peer2peer && (ep->mpa_attr.p2p_type !=
1562 FW_RI_INIT_P2PTYPE_DISABLED)) {
1564 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1566 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1568 mpa_v2_params.ord |=
1569 htons(MPA_V2_RDMA_WRITE_RTR);
1570 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1571 __func__, ep, p2p_type, mpa_v2_params.ird,
1574 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1576 mpa_v2_params.ord |=
1577 htons(MPA_V2_RDMA_READ_RTR);
1578 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1579 __func__, ep, p2p_type, mpa_v2_params.ird,
1584 memcpy(mpa->private_data, &mpa_v2_params,
1585 sizeof(struct mpa_v2_conn_params));
1588 memcpy(mpa->private_data +
1589 sizeof(struct mpa_v2_conn_params), pdata, plen);
1592 memcpy(mpa->private_data, pdata, plen);
1594 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1599 m_copyback(m, 0, mpalen, (void *)mpa);
1603 ep->com.state = MPA_REP_SENT;
1604 ep->snd_seq += mpalen;
1605 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1607 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1613 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1615 struct iw_cm_event event;
1617 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1618 memset(&event, 0, sizeof(event));
1619 event.event = IW_CM_EVENT_CLOSE;
1620 event.status = status;
1622 if (ep->com.cm_id) {
1624 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1625 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1626 deref_cm_id(&ep->com);
1627 set_bit(CLOSE_UPCALL, &ep->com.history);
1629 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1633 send_abort(struct c4iw_ep *ep)
1635 struct socket *so = ep->com.so;
1636 struct sockopt sopt;
1640 CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1641 states[ep->com.state], ep->hwtid);
1646 /* linger_time of 0 forces RST to be sent */
1647 sopt.sopt_dir = SOPT_SET;
1648 sopt.sopt_level = SOL_SOCKET;
1649 sopt.sopt_name = SO_LINGER;
1650 sopt.sopt_val = (caddr_t)&l;
1651 sopt.sopt_valsize = sizeof l;
1652 sopt.sopt_td = NULL;
1653 rc = -sosetopt(so, &sopt);
1655 log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1659 uninit_iwarp_socket(so);
1661 set_bit(ABORT_CONN, &ep->com.history);
1664 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1665 * request it has sent. But the current TOE driver is not propagating
1666 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1667 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1668 * handler(not yet implemented) of iw_cxgbe driver.
1670 release_ep_resources(ep);
1671 ep->com.state = DEAD;
1676 static void peer_close_upcall(struct c4iw_ep *ep)
1678 struct iw_cm_event event;
1680 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1681 memset(&event, 0, sizeof(event));
1682 event.event = IW_CM_EVENT_DISCONNECT;
1684 if (ep->com.cm_id) {
1686 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1687 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1688 set_bit(DISCONN_UPCALL, &ep->com.history);
1690 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1693 static void peer_abort_upcall(struct c4iw_ep *ep)
1695 struct iw_cm_event event;
1697 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1698 memset(&event, 0, sizeof(event));
1699 event.event = IW_CM_EVENT_CLOSE;
1700 event.status = -ECONNRESET;
1702 if (ep->com.cm_id) {
1704 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1705 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1706 deref_cm_id(&ep->com);
1707 set_bit(ABORT_UPCALL, &ep->com.history);
1709 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1712 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1714 struct iw_cm_event event;
1716 CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1717 memset(&event, 0, sizeof(event));
1718 event.event = IW_CM_EVENT_CONNECT_REPLY;
1719 event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1720 -ECONNRESET : status;
1721 event.local_addr = ep->com.local_addr;
1722 event.remote_addr = ep->com.remote_addr;
1724 if ((status == 0) || (status == -ECONNREFUSED)) {
1726 if (!ep->tried_with_mpa_v1) {
1728 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1729 /* this means MPA_v2 is used */
1730 event.ord = ep->ird;
1731 event.ird = ep->ord;
1732 event.private_data_len = ep->plen -
1733 sizeof(struct mpa_v2_conn_params);
1734 event.private_data = ep->mpa_pkt +
1735 sizeof(struct mpa_message) +
1736 sizeof(struct mpa_v2_conn_params);
1739 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1740 /* this means MPA_v1 is used */
1741 event.ord = c4iw_max_read_depth;
1742 event.ird = c4iw_max_read_depth;
1743 event.private_data_len = ep->plen;
1744 event.private_data = ep->mpa_pkt +
1745 sizeof(struct mpa_message);
1749 if (ep->com.cm_id) {
1751 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1752 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1753 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1756 if(status == -ECONNABORTED) {
1758 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1764 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1765 deref_cm_id(&ep->com);
1768 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1771 static int connect_request_upcall(struct c4iw_ep *ep)
1773 struct iw_cm_event event;
1776 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1777 ep->tried_with_mpa_v1);
1779 memset(&event, 0, sizeof(event));
1780 event.event = IW_CM_EVENT_CONNECT_REQUEST;
1781 event.local_addr = ep->com.local_addr;
1782 event.remote_addr = ep->com.remote_addr;
1783 event.provider_data = ep;
1785 if (!ep->tried_with_mpa_v1) {
1786 /* this means MPA_v2 is used */
1787 event.ord = ep->ord;
1788 event.ird = ep->ird;
1789 event.private_data_len = ep->plen -
1790 sizeof(struct mpa_v2_conn_params);
1791 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1792 sizeof(struct mpa_v2_conn_params);
1795 /* this means MPA_v1 is used. Send max supported */
1796 event.ord = c4iw_max_read_depth;
1797 event.ird = c4iw_max_read_depth;
1798 event.private_data_len = ep->plen;
1799 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1802 c4iw_get_ep(&ep->com);
1803 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1806 CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1807 " IWCM, err:%d", __func__, ep, ret);
1808 c4iw_put_ep(&ep->com);
1810 /* Dereference parent_ep only in success case.
1811 * In case of failure, parent_ep is dereferenced by the caller
1812 * of process_mpa_request().
1814 c4iw_put_ep(&ep->parent_ep->com);
1816 set_bit(CONNREQ_UPCALL, &ep->com.history);
1820 static void established_upcall(struct c4iw_ep *ep)
1822 struct iw_cm_event event;
1824 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1825 memset(&event, 0, sizeof(event));
1826 event.event = IW_CM_EVENT_ESTABLISHED;
1827 event.ird = ep->ord;
1828 event.ord = ep->ird;
1830 if (ep->com.cm_id) {
1832 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1833 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1834 set_bit(ESTAB_UPCALL, &ep->com.history);
1836 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1840 #define RELAXED_IRD_NEGOTIATION 1
1843 * process_mpa_reply - process streaming mode MPA reply
1847 * 0 upon success indicating a connect request was delivered to the ULP
1848 * or the mpa request is incomplete but valid so far.
1850 * 1 if a failure requires the caller to close the connection.
1852 * 2 if a failure requires the caller to abort the connection.
1854 static int process_mpa_reply(struct c4iw_ep *ep)
1856 struct mpa_message *mpa;
1857 struct mpa_v2_conn_params *mpa_v2_params;
1859 u16 resp_ird, resp_ord;
1860 u8 rtr_mismatch = 0, insuff_ird = 0;
1861 struct c4iw_qp_attributes attrs = {0};
1862 enum c4iw_qp_attr_mask mask;
1864 struct mbuf *top, *m;
1865 int flags = MSG_DONTWAIT;
1869 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1872 * Stop mpa timer. If it expired, then
1873 * we ignore the MPA reply. process_timeout()
1874 * will abort the connection.
1876 if (STOP_EP_TIMER(ep))
1879 uio.uio_resid = 1000000;
1880 uio.uio_td = ep->com.thread;
1881 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1885 if (err == EWOULDBLOCK) {
1887 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1892 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1896 if (ep->com.so->so_rcv.sb_mb) {
1898 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1899 printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1900 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1907 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1909 * If we get more than the supported amount of private data
1910 * then we must fail this connection.
1912 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1914 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1915 ep->mpa_pkt_len + m->m_len);
1917 goto err_stop_timer;
1921 * copy the new data into our accumulation buffer.
1923 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1924 ep->mpa_pkt_len += m->m_len;
1933 * if we don't even have the mpa message, then bail.
1935 if (ep->mpa_pkt_len < sizeof(*mpa)) {
1938 mpa = (struct mpa_message *) ep->mpa_pkt;
1940 /* Validate MPA header. */
1941 if (mpa->revision > mpa_rev) {
1943 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1944 mpa->revision, mpa_rev);
1945 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1946 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1948 goto err_stop_timer;
1951 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1953 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1955 goto err_stop_timer;
1958 plen = ntohs(mpa->private_data_size);
1961 * Fail if there's too much private data.
1963 if (plen > MPA_MAX_PRIVATE_DATA) {
1965 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1967 goto err_stop_timer;
1971 * If plen does not account for pkt size
1973 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1975 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1978 goto err_stop_timer;
1981 ep->plen = (u8) plen;
1984 * If we don't have all the pdata yet, then bail.
1985 * We'll continue process when more data arrives.
1987 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1989 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1993 if (mpa->flags & MPA_REJECT) {
1995 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1996 err = -ECONNREFUSED;
1997 goto err_stop_timer;
2001 * If we get here we have accumulated the entire mpa
2002 * start reply message including private data. And
2003 * the MPA header is valid.
2005 ep->com.state = FPDU_MODE;
2006 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2007 ep->mpa_attr.recv_marker_enabled = markers_enabled;
2008 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2009 ep->mpa_attr.version = mpa->revision;
2010 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2012 if (mpa->revision == 2) {
2014 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2015 ep->mpa_attr.enhanced_rdma_conn =
2016 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2018 if (ep->mpa_attr.enhanced_rdma_conn) {
2020 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2021 mpa_v2_params = (struct mpa_v2_conn_params *)
2022 (ep->mpa_pkt + sizeof(*mpa));
2023 resp_ird = ntohs(mpa_v2_params->ird) &
2024 MPA_V2_IRD_ORD_MASK;
2025 resp_ord = ntohs(mpa_v2_params->ord) &
2026 MPA_V2_IRD_ORD_MASK;
2029 * This is a double-check. Ideally, below checks are
2030 * not required since ird/ord stuff has been taken
2031 * care of in c4iw_accept_cr
2033 if (ep->ird < resp_ord) {
2034 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2035 ep->com.dev->rdev.adap->params.max_ordird_qp)
2039 } else if (ep->ird > resp_ord) {
2042 if (ep->ord > resp_ird) {
2043 if (RELAXED_IRD_NEGOTIATION)
2054 if (ntohs(mpa_v2_params->ird) &
2055 MPA_V2_PEER2PEER_MODEL) {
2057 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2058 if (ntohs(mpa_v2_params->ord) &
2059 MPA_V2_RDMA_WRITE_RTR) {
2061 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2062 ep->mpa_attr.p2p_type =
2063 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2065 else if (ntohs(mpa_v2_params->ord) &
2066 MPA_V2_RDMA_READ_RTR) {
2068 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2069 ep->mpa_attr.p2p_type =
2070 FW_RI_INIT_P2PTYPE_READ_REQ;
2076 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2078 if (mpa->revision == 1) {
2080 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2084 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2085 ep->mpa_attr.p2p_type = p2p_type;
2090 if (set_tcpinfo(ep)) {
2092 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2093 printf("%s set_tcpinfo error\n", __func__);
2098 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2099 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2100 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2101 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2102 ep->mpa_attr.p2p_type);
2105 * If responder's RTR does not match with that of initiator, assign
2106 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2107 * generated when moving QP to RTS state.
2108 * A TERM message will be sent after QP has moved to RTS state
2110 if ((ep->mpa_attr.version == 2) && peer2peer &&
2111 (ep->mpa_attr.p2p_type != p2p_type)) {
2113 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2114 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2119 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2120 attrs.mpa_attr = ep->mpa_attr;
2121 attrs.max_ird = ep->ird;
2122 attrs.max_ord = ep->ord;
2123 attrs.llp_stream_handle = ep;
2124 attrs.next_state = C4IW_QP_STATE_RTS;
2126 mask = C4IW_QP_ATTR_NEXT_STATE |
2127 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2128 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2130 /* bind QP and TID with INIT_WR */
2131 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2135 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2140 * If responder's RTR requirement did not match with what initiator
2141 * supports, generate TERM message
2145 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2146 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2147 attrs.layer_etype = LAYER_MPA | DDP_LLP;
2148 attrs.ecode = MPA_NOMATCH_RTR;
2149 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2150 attrs.send_term = 1;
2151 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2152 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2159 * Generate TERM if initiator IRD is not sufficient for responder
2160 * provided ORD. Currently, we do the same behaviour even when
2161 * responder provided IRD is also not sufficient as regards to
2166 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2167 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2169 attrs.layer_etype = LAYER_MPA | DDP_LLP;
2170 attrs.ecode = MPA_INSUFF_IRD;
2171 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2172 attrs.send_term = 1;
2173 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2174 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2185 connect_reply_upcall(ep, err);
2186 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2191 * process_mpa_request - process streaming mode MPA request
2195 * 0 upon success indicating a connect request was delivered to the ULP
2196 * or the mpa request is incomplete but valid so far.
2198 * 1 if a failure requires the caller to close the connection.
2200 * 2 if a failure requires the caller to abort the connection.
2203 process_mpa_request(struct c4iw_ep *ep)
2205 struct mpa_message *mpa;
2206 struct mpa_v2_conn_params *mpa_v2_params;
2208 int flags = MSG_DONTWAIT;
2212 enum c4iw_ep_state state = ep->com.state;
2214 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2216 if (state != MPA_REQ_WAIT)
2219 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2220 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2224 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2225 uio.uio_segflg = UIO_SYSSPACE;
2226 uio.uio_rw = UIO_READ;
2227 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2229 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2233 goto err_stop_timer;
2235 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2236 __func__, ep->com.so));
2237 ep->mpa_pkt_len += uio.uio_offset;
2240 * If we get more than the supported amount of private data then we must
2241 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another
2242 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2243 * byte is filled by the soreceive above.
2246 /* Don't even have the MPA message. Wait for more data to arrive. */
2247 if (ep->mpa_pkt_len < sizeof(*mpa))
2249 mpa = (struct mpa_message *) ep->mpa_pkt;
2252 * Validate MPA Header.
2254 if (mpa->revision > mpa_rev) {
2255 log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2256 " Received = %d\n", __func__, mpa_rev, mpa->revision);
2257 goto err_stop_timer;
2260 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2261 goto err_stop_timer;
2264 * Fail if there's too much private data.
2266 plen = ntohs(mpa->private_data_size);
2267 if (plen > MPA_MAX_PRIVATE_DATA)
2268 goto err_stop_timer;
2271 * If plen does not account for pkt size
2273 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2274 goto err_stop_timer;
2276 ep->plen = (u8) plen;
2279 * If we don't have all the pdata yet, then bail.
2281 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2285 * If we get here we have accumulated the entire mpa
2286 * start reply message including private data.
2288 ep->mpa_attr.initiator = 0;
2289 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2290 ep->mpa_attr.recv_marker_enabled = markers_enabled;
2291 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2292 ep->mpa_attr.version = mpa->revision;
2293 if (mpa->revision == 1)
2294 ep->tried_with_mpa_v1 = 1;
2295 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2297 if (mpa->revision == 2) {
2298 ep->mpa_attr.enhanced_rdma_conn =
2299 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2300 if (ep->mpa_attr.enhanced_rdma_conn) {
2301 mpa_v2_params = (struct mpa_v2_conn_params *)
2302 (ep->mpa_pkt + sizeof(*mpa));
2303 ep->ird = ntohs(mpa_v2_params->ird) &
2304 MPA_V2_IRD_ORD_MASK;
2305 ep->ird = min_t(u32, ep->ird,
2306 cur_max_read_depth(ep->com.dev));
2307 ep->ord = ntohs(mpa_v2_params->ord) &
2308 MPA_V2_IRD_ORD_MASK;
2309 ep->ord = min_t(u32, ep->ord,
2310 cur_max_read_depth(ep->com.dev));
2311 CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
2312 __func__, ep->ird, ep->ord);
2313 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2315 if (ntohs(mpa_v2_params->ord) &
2316 MPA_V2_RDMA_WRITE_RTR)
2317 ep->mpa_attr.p2p_type =
2318 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2319 else if (ntohs(mpa_v2_params->ord) &
2320 MPA_V2_RDMA_READ_RTR)
2321 ep->mpa_attr.p2p_type =
2322 FW_RI_INIT_P2PTYPE_READ_REQ;
2325 } else if (mpa->revision == 1 && peer2peer)
2326 ep->mpa_attr.p2p_type = p2p_type;
2328 if (set_tcpinfo(ep))
2329 goto err_stop_timer;
2331 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2332 "xmit_marker_enabled = %d, version = %d", __func__,
2333 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2334 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2336 ep->com.state = MPA_REQ_RCVD;
2340 if (ep->parent_ep->com.state != DEAD)
2341 if (connect_request_upcall(ep))
2352 * Upcall from the adapter indicating data has been transmitted.
2353 * For us its just the single MPA request or reply. We can now free
2354 * the skb holding the mpa message.
2356 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2359 struct c4iw_ep *ep = to_ep(cm_id);
2362 mutex_lock(&ep->com.mutex);
2363 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2365 if ((ep->com.state == DEAD) ||
2366 (ep->com.state != MPA_REQ_RCVD)) {
2368 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2369 mutex_unlock(&ep->com.mutex);
2370 c4iw_put_ep(&ep->com);
2373 set_bit(ULP_REJECT, &ep->com.history);
2377 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2382 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2383 abort = send_mpa_reject(ep, pdata, pdata_len);
2386 err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2387 mutex_unlock(&ep->com.mutex);
2388 c4iw_put_ep(&ep->com);
2389 CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2393 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2396 struct c4iw_qp_attributes attrs = {0};
2397 enum c4iw_qp_attr_mask mask;
2398 struct c4iw_ep *ep = to_ep(cm_id);
2399 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2400 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2403 mutex_lock(&ep->com.mutex);
2404 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2406 if ((ep->com.state == DEAD) ||
2407 (ep->com.state != MPA_REQ_RCVD)) {
2409 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2416 set_bit(ULP_ACCEPT, &ep->com.history);
2418 if ((conn_param->ord > c4iw_max_read_depth) ||
2419 (conn_param->ird > c4iw_max_read_depth)) {
2421 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2426 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2428 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2430 if (conn_param->ord > ep->ird) {
2431 if (RELAXED_IRD_NEGOTIATION) {
2432 conn_param->ord = ep->ird;
2434 ep->ird = conn_param->ird;
2435 ep->ord = conn_param->ord;
2436 send_mpa_reject(ep, conn_param->private_data,
2437 conn_param->private_data_len);
2442 if (conn_param->ird < ep->ord) {
2443 if (RELAXED_IRD_NEGOTIATION &&
2444 ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2445 conn_param->ird = ep->ord;
2452 ep->ird = conn_param->ird;
2453 ep->ord = conn_param->ord;
2455 if (ep->mpa_attr.version == 1) {
2456 if (peer2peer && ep->ird == 0)
2460 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2461 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2465 CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
2468 ep->com.cm_id = cm_id;
2469 ref_cm_id(&ep->com);
2472 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2474 /* bind QP to EP and move to RTS */
2475 attrs.mpa_attr = ep->mpa_attr;
2476 attrs.max_ird = ep->ird;
2477 attrs.max_ord = ep->ord;
2478 attrs.llp_stream_handle = ep;
2479 attrs.next_state = C4IW_QP_STATE_RTS;
2481 /* bind QP and TID with INIT_WR */
2482 mask = C4IW_QP_ATTR_NEXT_STATE |
2483 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2484 C4IW_QP_ATTR_MPA_ATTR |
2485 C4IW_QP_ATTR_MAX_IRD |
2486 C4IW_QP_ATTR_MAX_ORD;
2488 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2490 CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2491 goto err_defef_cm_id;
2494 err = send_mpa_reply(ep, conn_param->private_data,
2495 conn_param->private_data_len);
2497 CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2498 goto err_defef_cm_id;
2501 ep->com.state = FPDU_MODE;
2502 established_upcall(ep);
2503 mutex_unlock(&ep->com.mutex);
2504 c4iw_put_ep(&ep->com);
2505 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2508 deref_cm_id(&ep->com);
2513 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2514 mutex_unlock(&ep->com.mutex);
2515 c4iw_put_ep(&ep->com);
2516 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2521 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2525 struct socket *sock = NULL;
2526 struct sockopt sopt;
2528 ret = sock_create_kern(laddr->ss_family,
2529 SOCK_STREAM, IPPROTO_TCP, &sock);
2531 CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2537 bzero(&sopt, sizeof(struct sockopt));
2538 sopt.sopt_dir = SOPT_SET;
2539 sopt.sopt_level = SOL_SOCKET;
2540 sopt.sopt_name = SO_REUSEADDR;
2542 sopt.sopt_val = &on;
2543 sopt.sopt_valsize = sizeof(on);
2544 ret = -sosetopt(sock, &sopt);
2546 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
2547 "failed with %d.\n", __func__, sock, ret);
2549 bzero(&sopt, sizeof(struct sockopt));
2550 sopt.sopt_dir = SOPT_SET;
2551 sopt.sopt_level = SOL_SOCKET;
2552 sopt.sopt_name = SO_REUSEPORT;
2554 sopt.sopt_val = &on;
2555 sopt.sopt_valsize = sizeof(on);
2556 ret = -sosetopt(sock, &sopt);
2558 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
2559 "failed with %d.\n", __func__, sock, ret);
2563 ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
2565 CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2571 size = laddr->ss_family == AF_INET6 ?
2572 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2573 ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2575 CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2585 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2588 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2589 struct c4iw_ep *ep = NULL;
2590 struct ifnet *nh_ifp; /* Logical egress interface */
2592 struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2593 struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2596 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2599 if ((conn_param->ord > c4iw_max_read_depth) ||
2600 (conn_param->ird > c4iw_max_read_depth)) {
2602 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2606 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2607 cm_id->provider_data = ep;
2609 init_timer(&ep->timer);
2610 ep->plen = conn_param->private_data_len;
2614 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2615 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2616 conn_param->private_data, ep->plen);
2618 ep->ird = conn_param->ird;
2619 ep->ord = conn_param->ord;
2621 if (peer2peer && ep->ord == 0) {
2623 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2628 ep->com.cm_id = cm_id;
2629 ref_cm_id(&ep->com);
2630 ep->com.qp = get_qhp(dev, conn_param->qpn);
2634 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2639 ep->com.thread = curthread;
2642 err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2647 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2648 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2653 if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2654 TOEDEV(nh_ifp) == NULL) {
2658 ep->com.state = CONNECTING;
2660 ep->com.local_addr = cm_id->local_addr;
2661 ep->com.remote_addr = cm_id->remote_addr;
2663 err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2667 setiwsockopt(ep->com.so);
2668 init_iwarp_socket(ep->com.so, &ep->com);
2669 err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2673 CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2677 uninit_iwarp_socket(ep->com.so);
2678 ep->com.state = DEAD;
2679 sock_release(ep->com.so);
2681 deref_cm_id(&ep->com);
2682 c4iw_put_ep(&ep->com);
2685 CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2690 * iwcm->create_listen. Returns -errno on failure.
2693 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2695 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2696 struct c4iw_listen_ep *lep = NULL;
2697 struct listen_port_info *port_info = NULL;
2700 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2702 lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2703 lep->com.cm_id = cm_id;
2704 ref_cm_id(&lep->com);
2706 lep->backlog = backlog;
2707 lep->com.local_addr = cm_id->local_addr;
2708 lep->com.thread = curthread;
2709 cm_id->provider_data = lep;
2710 lep->com.state = LISTEN;
2712 /* In case of INDADDR_ANY, ibcore creates cmid for each device and
2713 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2714 * HW listeners for each device seperately. But toecore expects single
2715 * solisten() call with INADDR_ANY address to create HW listeners on
2716 * all devices for a given port number. So iw_cxgbe driver calls
2717 * solisten() only once for INADDR_ANY(usually done at first time
2718 * listener callback from ibcore). And all the subsequent INADDR_ANY
2719 * listener callbacks from ibcore(for the same port address) do not
2720 * invoke solisten() as first listener callback has already created
2721 * listeners for all other devices(via solisten).
2723 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2724 port_info = add_ep_to_listenlist(lep);
2725 /* skip solisten() if refcnt > 1, as the listeners were
2726 * alredy created by 'Master lep'
2728 if (port_info->refcnt > 1) {
2729 /* As there will be only one listener socket for a TCP
2730 * port, copy Master lep's socket pointer to other lep's
2731 * that are belonging to same TCP port.
2733 struct c4iw_listen_ep *head_lep =
2734 container_of(port_info->lep_list.next,
2735 struct c4iw_listen_ep, listen_ep_list);
2736 lep->com.so = head_lep->com.so;
2740 rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2742 CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2747 rc = -solisten(lep->com.so, backlog, curthread);
2749 CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2750 __func__, lep->com.so, rc);
2753 init_iwarp_socket(lep->com.so, &lep->com);
2758 sock_release(lep->com.so);
2761 rem_ep_from_listenlist(lep);
2762 deref_cm_id(&lep->com);
2763 c4iw_put_ep(&lep->com);
2768 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2770 struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2772 mutex_lock(&lep->com.mutex);
2773 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2774 states[lep->com.state]);
2776 lep->com.state = DEAD;
2777 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2778 /* if no refcount then close listen socket */
2779 if (!rem_ep_from_listenlist(lep))
2780 close_socket(lep->com.so);
2782 close_socket(lep->com.so);
2783 deref_cm_id(&lep->com);
2784 mutex_unlock(&lep->com.mutex);
2785 c4iw_put_ep(&lep->com);
2789 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2792 mutex_lock(&ep->com.mutex);
2793 ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2794 mutex_unlock(&ep->com.mutex);
2798 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2803 struct c4iw_rdev *rdev;
2806 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2808 rdev = &ep->com.dev->rdev;
2810 if (c4iw_fatal_error(rdev)) {
2812 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2814 close_complete_upcall(ep, -ECONNRESET);
2816 ep->com.state = DEAD;
2818 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2819 states[ep->com.state]);
2822 * Ref the ep here in case we have fatal errors causing the
2823 * ep to be released and freed.
2825 c4iw_get_ep(&ep->com);
2826 switch (ep->com.state) {
2835 ep->com.state = ABORTING;
2837 ep->com.state = CLOSING;
2840 set_bit(CLOSE_SENT, &ep->com.flags);
2845 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2850 ep->com.state = ABORTING;
2852 ep->com.state = MORIBUND;
2860 "%s ignoring disconnect ep %p state %u", __func__,
2872 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2876 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2877 set_bit(EP_DISC_ABORT, &ep->com.history);
2878 close_complete_upcall(ep, -ECONNRESET);
2879 ret = send_abort(ep);
2884 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2885 set_bit(EP_DISC_CLOSE, &ep->com.history);
2888 ep->com.state = MORIBUND;
2890 CURVNET_SET(ep->com.so->so_vnet);
2891 sodisconnect(ep->com.so);
2898 set_bit(EP_DISC_FAIL, &ep->com.history);
2901 close_complete_upcall(ep, -EIO);
2904 struct c4iw_qp_attributes attrs = {0};
2906 attrs.next_state = C4IW_QP_STATE_ERROR;
2907 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2908 C4IW_QP_ATTR_NEXT_STATE,
2911 CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2912 printf("%s - qp <- error failed!\n", __func__);
2915 release_ep_resources(ep);
2916 ep->com.state = DEAD;
2917 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2919 c4iw_put_ep(&ep->com);
2920 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2924 #ifdef C4IW_EP_REDIRECT
2925 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2926 struct l2t_entry *l2t)
2928 struct c4iw_ep *ep = ctx;
2933 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2936 cxgb4_l2t_release(ep->l2t);
2946 static void ep_timeout(unsigned long arg)
2948 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2950 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2953 * Only insert if it is not already on the list.
2955 if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2956 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2957 add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2962 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2964 uint64_t val = be64toh(*rpl);
2966 struct c4iw_wr_wait *wr_waitp;
2968 ret = (int)((val >> 8) & 0xff);
2969 wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2970 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2972 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2977 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2979 struct cqe_list_entry *cle;
2982 cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2983 cle->rhp = sc->iwarp_softc;
2984 cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2986 spin_lock_irqsave(&err_cqe_lock, flag);
2987 list_add_tail(&cle->entry, &err_cqe_list);
2988 queue_work(c4iw_taskq, &c4iw_task);
2989 spin_unlock_irqrestore(&err_cqe_lock, flag);
2995 process_terminate(struct c4iw_ep *ep)
2997 struct c4iw_qp_attributes attrs = {0};
2999 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
3001 if (ep && ep->com.qp) {
3003 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
3004 ep->hwtid, ep->com.qp->wq.sq.qid);
3005 attrs.next_state = C4IW_QP_STATE_TERMINATE;
3006 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
3009 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
3011 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
3016 int __init c4iw_cm_init(void)
3019 t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
3020 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
3021 t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
3022 t4_register_an_handler(c4iw_ev_handler);
3024 TAILQ_INIT(&req_list);
3025 spin_lock_init(&req_lock);
3026 INIT_LIST_HEAD(&err_cqe_list);
3027 spin_lock_init(&err_cqe_lock);
3029 INIT_WORK(&c4iw_task, process_req);
3031 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
3038 void __exit c4iw_cm_term(void)
3040 WARN_ON(!TAILQ_EMPTY(&req_list));
3041 WARN_ON(!list_empty(&err_cqe_list));
3042 flush_workqueue(c4iw_taskq);
3043 destroy_workqueue(c4iw_taskq);
3045 t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3046 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3047 t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3048 t4_register_an_handler(NULL);