2 * Copyright (c) 2012 The FreeBSD Foundation
3 * Copyright (c) 2015 Chelsio Communications, Inc.
6 * This software was developed by Edward Tomasz Napierala under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
40 #include "opt_inet6.h"
43 #include <sys/param.h>
44 #include <sys/capsicum.h>
45 #include <sys/condvar.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
52 #include <sys/mutex.h>
53 #include <sys/module.h>
54 #include <sys/protosw.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/systm.h>
61 #include <machine/bus.h>
63 #include <netinet/in.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/toecore.h>
69 #include <dev/iscsi/icl.h>
70 #include <dev/iscsi/iscsi_proto.h>
71 #include <icl_conn_if.h>
73 #include "common/common.h"
74 #include "common/t4_tcb.h"
75 #include "tom/t4_tom.h"
78 SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD, 0, "Chelsio iSCSI offload");
79 static int coalesce = 1;
80 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, coalesce, CTLFLAG_RWTUN,
81 &coalesce, 0, "Try to coalesce PDUs before sending");
82 static int partial_receive_len = 128 * 1024;
83 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN,
84 &partial_receive_len, 0, "Minimum read size for partially received "
86 static int sendspace = 1048576;
87 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN,
88 &sendspace, 0, "Default send socket buffer size");
89 static int recvspace = 1048576;
90 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN,
91 &recvspace, 0, "Default receive socket buffer size");
93 static uma_zone_t icl_transfer_zone;
95 static volatile u_int icl_cxgbei_ncons;
97 #define ICL_CONN_LOCK(X) mtx_lock(X->ic_lock)
98 #define ICL_CONN_UNLOCK(X) mtx_unlock(X->ic_lock)
99 #define ICL_CONN_LOCK_ASSERT(X) mtx_assert(X->ic_lock, MA_OWNED)
100 #define ICL_CONN_LOCK_ASSERT_NOT(X) mtx_assert(X->ic_lock, MA_NOTOWNED)
102 struct icl_pdu *icl_cxgbei_new_pdu(int);
103 void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *);
105 static icl_conn_new_pdu_t icl_cxgbei_conn_new_pdu;
106 icl_conn_pdu_free_t icl_cxgbei_conn_pdu_free;
107 static icl_conn_pdu_data_segment_length_t
108 icl_cxgbei_conn_pdu_data_segment_length;
109 static icl_conn_pdu_append_data_t icl_cxgbei_conn_pdu_append_data;
110 static icl_conn_pdu_get_data_t icl_cxgbei_conn_pdu_get_data;
111 static icl_conn_pdu_queue_t icl_cxgbei_conn_pdu_queue;
112 static icl_conn_handoff_t icl_cxgbei_conn_handoff;
113 static icl_conn_free_t icl_cxgbei_conn_free;
114 static icl_conn_close_t icl_cxgbei_conn_close;
115 static icl_conn_task_setup_t icl_cxgbei_conn_task_setup;
116 static icl_conn_task_done_t icl_cxgbei_conn_task_done;
117 static icl_conn_transfer_setup_t icl_cxgbei_conn_transfer_setup;
118 static icl_conn_transfer_done_t icl_cxgbei_conn_transfer_done;
120 static kobj_method_t icl_cxgbei_methods[] = {
121 KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu),
122 KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free),
123 KOBJMETHOD(icl_conn_pdu_data_segment_length,
124 icl_cxgbei_conn_pdu_data_segment_length),
125 KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data),
126 KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data),
127 KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue),
128 KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff),
129 KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free),
130 KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close),
131 KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup),
132 KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done),
133 KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup),
134 KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done),
138 DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn));
142 * Subtract another 256 for AHS from MAX_DSL if AHS could be used.
144 #define CXGBEI_MAX_PDU 16224
145 #define CXGBEI_MAX_DSL (CXGBEI_MAX_PDU - sizeof(struct iscsi_bhs) - 8)
147 #define CXGBEI_MAX_DSL 8192
148 #define CXGBEI_MAX_PDU (CXGBEI_MAX_DSL + sizeof(struct iscsi_bhs) + 8)
151 icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip)
154 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
157 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
158 MPASS(ic == ip->ip_conn);
159 MPASS(ip->ip_bhs_mbuf != NULL);
161 m_freem(ip->ip_ahs_mbuf);
162 m_freem(ip->ip_data_mbuf);
163 m_freem(ip->ip_bhs_mbuf); /* storage for icl_cxgbei_pdu itself */
166 if (__predict_true(ic != NULL))
167 refcount_release(&ic->ic_outstanding_pdus);
172 icl_cxgbei_new_pdu(int flags)
174 struct icl_cxgbei_pdu *icp;
179 m = m_gethdr(flags, MT_DATA);
180 if (__predict_false(m == NULL))
183 a = roundup2(mtod(m, uintptr_t), _Alignof(struct icl_cxgbei_pdu));
184 icp = (struct icl_cxgbei_pdu *)a;
185 bzero(icp, sizeof(*icp));
187 icp->icp_signature = CXGBEI_PDU_SIGNATURE;
191 a = roundup2((uintptr_t)(icp + 1), _Alignof(struct iscsi_bhs *));
192 ip->ip_bhs = (struct iscsi_bhs *)a;
194 /* Everything must fit entirely in the mbuf. */
195 a = (uintptr_t)(ip->ip_bhs + 1);
196 MPASS(a <= (uintptr_t)m + MSIZE);
198 bzero(ip->ip_bhs, sizeof(*ip->ip_bhs));
200 m->m_data = (void *)ip->ip_bhs;
201 m->m_len = sizeof(struct iscsi_bhs);
202 m->m_pkthdr.len = m->m_len;
208 icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic)
213 refcount_acquire(&ic->ic_outstanding_pdus);
218 * Allocate icl_pdu with empty BHS to fill up by the caller.
220 static struct icl_pdu *
221 icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags)
225 ip = icl_cxgbei_new_pdu(flags);
226 if (__predict_false(ip == NULL))
228 icl_cxgbei_new_pdu_set_conn(ip, ic);
234 icl_pdu_data_segment_length(const struct icl_pdu *request)
238 len += request->ip_bhs->bhs_data_segment_len[0];
240 len += request->ip_bhs->bhs_data_segment_len[1];
242 len += request->ip_bhs->bhs_data_segment_len[2];
248 icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic,
249 const struct icl_pdu *request)
252 return (icl_pdu_data_segment_length(request));
256 icl_conn_build_tasktag(struct icl_conn *ic, uint32_t tag)
262 finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp)
264 struct icl_pdu *ip = &icp->ip;
265 uint8_t ulp_submode, padding;
266 struct mbuf *m, *last;
267 struct iscsi_bhs *bhs;
270 * Fix up the data segment mbuf first.
272 m = ip->ip_data_mbuf;
273 ulp_submode = icc->ulp_submode;
278 * Round up the data segment to a 4B boundary. Pad with 0 if
279 * necessary. There will definitely be room in the mbuf.
281 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len;
283 bzero(mtod(last, uint8_t *) + last->m_len, padding);
284 last->m_len += padding;
287 MPASS(ip->ip_data_len == 0);
288 ulp_submode &= ~ULP_CRC_DATA;
293 * Now the header mbuf that has the BHS.
296 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs));
297 MPASS(m->m_len == sizeof(struct iscsi_bhs));
300 bhs->bhs_data_segment_len[2] = ip->ip_data_len;
301 bhs->bhs_data_segment_len[1] = ip->ip_data_len >> 8;
302 bhs->bhs_data_segment_len[0] = ip->ip_data_len >> 16;
304 /* "Convert" PDU to mbuf chain. Do not use icp/ip after this. */
305 m->m_pkthdr.len = sizeof(struct iscsi_bhs) + ip->ip_data_len + padding;
306 m->m_next = ip->ip_data_mbuf;
307 set_mbuf_ulp_submode(m, ulp_submode);
309 bzero(icp, sizeof(*icp));
312 refcount_release(&icc->ic.ic_outstanding_pdus);
319 icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip,
320 const void *addr, size_t len, int flags)
324 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
327 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
328 MPASS(ic == ip->ip_conn);
329 KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len));
331 m = ip->ip_data_mbuf;
333 m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES);
334 if (__predict_false(m == NULL))
337 ip->ip_data_mbuf = m;
340 if (__predict_true(m_append(m, len, addr) != 0)) {
341 ip->ip_data_len += len;
342 MPASS(ip->ip_data_len <= CXGBEI_MAX_DSL);
345 if (flags & M_WAITOK) {
346 CXGBE_UNIMPLEMENTED("fail safe append");
348 ip->ip_data_len = m_length(m, NULL);
354 icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip,
355 size_t off, void *addr, size_t len)
357 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
359 if (icp->pdu_flags & SBUF_ULP_FLAG_DATA_DDPED)
360 return; /* data is DDP'ed, no need to copy */
361 m_copydata(ip->ip_data_mbuf, off, len, addr);
365 icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip)
367 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
368 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
369 struct socket *so = ic->ic_socket;
370 struct toepcb *toep = icc->toep;
374 MPASS(ic == ip->ip_conn);
375 MPASS(ip->ip_bhs_mbuf != NULL);
376 /* The kernel doesn't generate PDUs with AHS. */
377 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0);
379 ICL_CONN_LOCK_ASSERT(ic);
380 /* NOTE: sowriteable without so_snd lock is a mostly harmless race. */
381 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) {
382 icl_cxgbei_conn_pdu_free(ic, ip);
386 m = finalize_pdu(icc, icp);
388 MPASS((m->m_pkthdr.len & 3) == 0);
389 MPASS(m->m_pkthdr.len + 8 <= CXGBEI_MAX_PDU);
392 * Do not get inp from toep->inp as the toepcb might have detached
397 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) ||
398 __predict_false((toep->flags & TPF_ATTACHED) == 0))
401 mbufq_enqueue(&toep->ulp_pduq, m);
402 t4_push_pdus(icc->sc, toep, 0);
407 static struct icl_conn *
408 icl_cxgbei_new_conn(const char *name, struct mtx *lock)
410 struct icl_cxgbei_conn *icc;
413 refcount_acquire(&icl_cxgbei_ncons);
415 icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE,
417 icc->icc_signature = CXGBEI_CONN_SIGNATURE;
418 STAILQ_INIT(&icc->rcvd_pdus);
423 /* XXXNP: review. Most of these icl_conn fields aren't really used */
424 STAILQ_INIT(&ic->ic_to_send);
425 cv_init(&ic->ic_send_cv, "icl_cxgbei_tx");
426 cv_init(&ic->ic_receive_cv, "icl_cxgbei_rx");
428 refcount_init(&ic->ic_outstanding_pdus, 0);
430 ic->ic_max_data_segment_length = CXGBEI_MAX_DSL;
432 ic->ic_offload = "cxgbei";
433 ic->ic_unmapped = false;
435 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
441 icl_cxgbei_conn_free(struct icl_conn *ic)
443 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
445 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
447 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
449 cv_destroy(&ic->ic_send_cv);
450 cv_destroy(&ic->ic_receive_cv);
452 kobj_delete((struct kobj *)icc, M_CXGBE);
453 refcount_release(&icl_cxgbei_ncons);
457 icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so)
464 * For sendspace, this is required because the current code cannot
465 * send a PDU in pieces; thus, the minimum buffer size is equal
466 * to the maximum PDU size. "+4" is to account for possible padding.
468 * What we should actually do here is to use autoscaling, but set
469 * some minimal buffer size to "minspace". I don't know a way to do
472 minspace = sizeof(struct iscsi_bhs) + ic->ic_max_data_segment_length +
473 ISCSI_HEADER_DIGEST_SIZE + ISCSI_DATA_DIGEST_SIZE + 4;
474 if (sendspace < minspace)
475 sendspace = minspace;
476 if (recvspace < minspace)
477 recvspace = minspace;
479 error = soreserve(so, sendspace, recvspace);
481 icl_cxgbei_conn_close(ic);
484 SOCKBUF_LOCK(&so->so_snd);
485 so->so_snd.sb_flags |= SB_AUTOSIZE;
486 SOCKBUF_UNLOCK(&so->so_snd);
487 SOCKBUF_LOCK(&so->so_rcv);
488 so->so_rcv.sb_flags |= SB_AUTOSIZE;
489 SOCKBUF_UNLOCK(&so->so_rcv);
494 bzero(&opt, sizeof(opt));
495 opt.sopt_dir = SOPT_SET;
496 opt.sopt_level = IPPROTO_TCP;
497 opt.sopt_name = TCP_NODELAY;
499 opt.sopt_valsize = sizeof(one);
500 error = sosetopt(so, &opt);
502 icl_cxgbei_conn_close(ic);
510 * Request/response structure used to find out the adapter offloading a socket.
512 struct find_ofld_adapter_rr {
514 struct adapter *sc; /* result */
518 find_offload_adapter(struct adapter *sc, void *arg)
520 struct find_ofld_adapter_rr *fa = arg;
521 struct socket *so = fa->so;
522 struct tom_data *td = sc->tom_softc;
526 /* Non-TCP were filtered out earlier. */
527 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
530 return; /* Found already. */
533 return; /* TOE not enabled on this adapter. */
537 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
539 if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
540 fa->sc = sc; /* Found. */
545 /* XXXNP: move this to t4_tom. */
547 send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
550 struct fw_flowc_wr *flowc;
551 const u_int nparams = 1;
553 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
555 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
557 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
560 panic("%s: allocation failure.", __func__);
563 memset(flowc, 0, wr->wr_len);
565 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
566 V_FW_FLOWC_WR_NPARAMS(nparams));
567 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
568 V_FW_WR_FLOWID(toep->tid));
570 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
571 flowc->mnemval[0].val = htobe32(maxlen);
573 txsd->tx_credits = howmany(flowclen, 16);
575 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
576 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
577 toep->tx_credits -= txsd->tx_credits;
578 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
586 set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, int hcrc, int dcrc)
588 uint64_t val = ULP_MODE_ISCSI;
591 val |= ULP_CRC_HEADER << 4;
593 val |= ULP_CRC_DATA << 4;
595 CTR4(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, CRC hdr=%d data=%d",
596 __func__, toep->tid, hcrc, dcrc);
598 t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_ULP_TYPE,
599 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val,
600 0, 0, toep->ofld_rxq->iq.abs_id);
604 * XXXNP: Who is responsible for cleaning up the socket if this returns with an
605 * error? Review all error paths.
607 * XXXNP: What happens to the socket's fd reference if the operation is
608 * successful, and how does that affect the socket's life cycle?
611 icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
613 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
614 struct find_ofld_adapter_rr fa;
623 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
624 ICL_CONN_LOCK_ASSERT_NOT(ic);
627 * Steal the socket from userland.
629 error = fget(curthread, fd,
630 cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp);
633 if (fp->f_type != DTYPE_SOCKET) {
634 fdrop(fp, curthread);
638 if (so->so_type != SOCK_STREAM ||
639 so->so_proto->pr_protocol != IPPROTO_TCP) {
640 fdrop(fp, curthread);
645 if (ic->ic_socket != NULL) {
647 fdrop(fp, curthread);
650 ic->ic_disconnecting = false;
652 fp->f_ops = &badfileops;
654 fdrop(fp, curthread);
657 /* Find the adapter offloading this socket. */
660 t4_iterate(find_offload_adapter, &fa);
665 error = icl_cxgbei_setsockopt(ic, so);
672 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))
676 * socket could not have been "unoffloaded" if here.
678 MPASS(tp->t_flags & TF_TOE);
679 MPASS(tp->tod != NULL);
680 MPASS(tp->t_toe != NULL);
682 MPASS(toep->vi->pi->adapter == icc->sc);
684 icc->cwt = cxgbei_select_worker_thread(icc);
685 icc->ulp_submode = 0;
686 if (ic->ic_header_crc32c)
687 icc->ulp_submode |= ULP_CRC_HEADER;
688 if (ic->ic_data_crc32c)
689 icc->ulp_submode |= ULP_CRC_DATA;
690 so->so_options |= SO_NO_DDP;
691 toep->ulp_mode = ULP_MODE_ISCSI;
694 send_iscsi_flowc_wr(icc->sc, toep, CXGBEI_MAX_PDU);
695 set_ulp_mode_iscsi(icc->sc, toep, ic->ic_header_crc32c,
705 icl_cxgbei_conn_close(struct icl_conn *ic)
707 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
712 struct toepcb *toep = icc->toep;
714 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
715 ICL_CONN_LOCK_ASSERT_NOT(ic);
719 if (ic->ic_disconnecting || so == NULL) {
720 CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p",
721 __func__, icc, ic->ic_disconnecting, so);
725 ic->ic_disconnecting = true;
727 /* These are unused in this driver right now. */
728 MPASS(STAILQ_EMPTY(&ic->ic_to_send));
729 MPASS(ic->ic_receive_pdu == NULL);
732 KASSERT(ic->ic_outstanding_pdus == 0,
733 ("destroying session with %d outstanding PDUs",
734 ic->ic_outstanding_pdus));
738 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1,
743 if (toep != NULL) { /* NULL if connection was never offloaded. */
745 mbufq_drain(&toep->ulp_pduq);
747 if (icc->rx_flags & RXF_ACTIVE) {
748 volatile u_int *p = &icc->rx_flags;
753 while (*p & RXF_ACTIVE)
760 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) {
761 ip = STAILQ_FIRST(&icc->rcvd_pdus);
762 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next);
763 icl_cxgbei_conn_pdu_free(ic, ip);
770 ic->ic_socket = NULL;
774 * XXXNP: we should send RST instead of FIN when PDUs held in various
775 * queues were purged instead of delivered reliably but soabort isn't
776 * really general purpose and wouldn't do the right thing here.
782 icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip,
783 struct ccb_scsiio *csio, uint32_t *task_tagp, void **prvp)
787 *task_tagp = icl_conn_build_tasktag(ic, *task_tagp);
789 prv = uma_zalloc(icl_transfer_zone, M_NOWAIT | M_ZERO);
795 cxgbei_conn_task_reserve_itt(ic, prvp, csio, task_tagp);
801 icl_cxgbei_conn_task_done(struct icl_conn *ic, void *prv)
804 cxgbei_cleanup_task(ic, prv);
805 uma_zfree(icl_transfer_zone, prv);
809 icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io,
810 uint32_t *transfer_tag, void **prvp)
814 *transfer_tag = icl_conn_build_tasktag(ic, *transfer_tag);
816 prv = uma_zalloc(icl_transfer_zone, M_NOWAIT | M_ZERO);
822 cxgbei_conn_transfer_reserve_ttt(ic, prvp, io, transfer_tag);
828 icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *prv)
830 cxgbei_cleanup_task(ic, prv);
831 uma_zfree(icl_transfer_zone, prv);
835 icl_cxgbei_limits(size_t *limitp)
838 *limitp = CXGBEI_MAX_DSL;
844 icl_cxgbei_load(void)
848 icl_transfer_zone = uma_zcreate("icl_transfer",
849 16 * 1024, NULL, NULL, NULL, NULL,
852 refcount_init(&icl_cxgbei_ncons, 0);
854 error = icl_register("cxgbei", false, -100, icl_cxgbei_limits,
855 icl_cxgbei_new_conn);
856 KASSERT(error == 0, ("failed to register"));
862 icl_cxgbei_unload(void)
865 if (icl_cxgbei_ncons != 0)
868 icl_unregister("cxgbei", false);
870 uma_zdestroy(icl_transfer_zone);
876 icl_cxgbei_modevent(module_t mod, int what, void *arg)
881 return (icl_cxgbei_load());
883 return (icl_cxgbei_unload());
889 moduledata_t icl_cxgbei_data = {
895 DECLARE_MODULE(icl_cxgbei, icl_cxgbei_data, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
896 MODULE_DEPEND(icl_cxgbei, icl, 1, 1, 1);
897 MODULE_VERSION(icl_cxgbei, 1);