2 * Copyright (c) 2012 The FreeBSD Foundation
3 * Copyright (c) 2015 Chelsio Communications, Inc.
6 * This software was developed by Edward Tomasz Napierala under sponsorship
7 * from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
40 #include "opt_inet6.h"
43 #include <sys/param.h>
44 #include <sys/capsicum.h>
45 #include <sys/condvar.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
53 #include <sys/mutex.h>
54 #include <sys/module.h>
55 #include <sys/protosw.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
62 #include <machine/bus.h>
66 #include <netinet/in.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_var.h>
70 #include <netinet/toecore.h>
72 #include <dev/iscsi/icl.h>
73 #include <dev/iscsi/iscsi_proto.h>
74 #include <icl_conn_if.h>
76 #include <cam/scsi/scsi_all.h>
77 #include <cam/scsi/scsi_da.h>
78 #include <cam/ctl/ctl_io.h>
79 #include <cam/ctl/ctl.h>
80 #include <cam/ctl/ctl_backend.h>
81 #include <cam/ctl/ctl_error.h>
82 #include <cam/ctl/ctl_frontend.h>
83 #include <cam/ctl/ctl_debug.h>
84 #include <cam/ctl/ctl_ha.h>
85 #include <cam/ctl/ctl_ioctl.h>
88 #include <cam/cam_ccb.h>
89 #include <cam/cam_xpt.h>
90 #include <cam/cam_debug.h>
91 #include <cam/cam_sim.h>
92 #include <cam/cam_xpt_sim.h>
93 #include <cam/cam_xpt_periph.h>
94 #include <cam/cam_periph.h>
95 #include <cam/cam_compat.h>
96 #include <cam/scsi/scsi_message.h>
98 #include "common/common.h"
99 #include "common/t4_tcb.h"
100 #include "tom/t4_tom.h"
103 SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
104 "Chelsio iSCSI offload");
105 static int coalesce = 1;
106 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, coalesce, CTLFLAG_RWTUN,
107 &coalesce, 0, "Try to coalesce PDUs before sending");
108 static int partial_receive_len = 128 * 1024;
109 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN,
110 &partial_receive_len, 0, "Minimum read size for partially received "
112 static int sendspace = 1048576;
113 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN,
114 &sendspace, 0, "Default send socket buffer size");
115 static int recvspace = 1048576;
116 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN,
117 &recvspace, 0, "Default receive socket buffer size");
119 static uma_zone_t prsv_zone;
120 static volatile u_int icl_cxgbei_ncons;
122 #define ICL_CONN_LOCK(X) mtx_lock(X->ic_lock)
123 #define ICL_CONN_UNLOCK(X) mtx_unlock(X->ic_lock)
124 #define ICL_CONN_LOCK_ASSERT(X) mtx_assert(X->ic_lock, MA_OWNED)
125 #define ICL_CONN_LOCK_ASSERT_NOT(X) mtx_assert(X->ic_lock, MA_NOTOWNED)
127 struct icl_pdu *icl_cxgbei_new_pdu(int);
128 void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *);
130 static icl_conn_new_pdu_t icl_cxgbei_conn_new_pdu;
131 icl_conn_pdu_free_t icl_cxgbei_conn_pdu_free;
132 static icl_conn_pdu_data_segment_length_t
133 icl_cxgbei_conn_pdu_data_segment_length;
134 static icl_conn_pdu_append_data_t icl_cxgbei_conn_pdu_append_data;
135 static icl_conn_pdu_get_data_t icl_cxgbei_conn_pdu_get_data;
136 static icl_conn_pdu_queue_t icl_cxgbei_conn_pdu_queue;
137 static icl_conn_handoff_t icl_cxgbei_conn_handoff;
138 static icl_conn_free_t icl_cxgbei_conn_free;
139 static icl_conn_close_t icl_cxgbei_conn_close;
140 static icl_conn_task_setup_t icl_cxgbei_conn_task_setup;
141 static icl_conn_task_done_t icl_cxgbei_conn_task_done;
142 static icl_conn_transfer_setup_t icl_cxgbei_conn_transfer_setup;
143 static icl_conn_transfer_done_t icl_cxgbei_conn_transfer_done;
145 static kobj_method_t icl_cxgbei_methods[] = {
146 KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu),
147 KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free),
148 KOBJMETHOD(icl_conn_pdu_data_segment_length,
149 icl_cxgbei_conn_pdu_data_segment_length),
150 KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data),
151 KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data),
152 KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue),
153 KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff),
154 KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free),
155 KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close),
156 KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup),
157 KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done),
158 KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup),
159 KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done),
163 DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn));
166 icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip)
169 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
172 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
173 MPASS(ic == ip->ip_conn);
174 MPASS(ip->ip_bhs_mbuf != NULL);
176 m_freem(ip->ip_ahs_mbuf);
177 m_freem(ip->ip_data_mbuf);
178 m_freem(ip->ip_bhs_mbuf); /* storage for icl_cxgbei_pdu itself */
181 if (__predict_true(ic != NULL))
182 refcount_release(&ic->ic_outstanding_pdus);
187 icl_cxgbei_new_pdu(int flags)
189 struct icl_cxgbei_pdu *icp;
194 m = m_gethdr(flags, MT_DATA);
195 if (__predict_false(m == NULL))
198 a = roundup2(mtod(m, uintptr_t), _Alignof(struct icl_cxgbei_pdu));
199 icp = (struct icl_cxgbei_pdu *)a;
200 bzero(icp, sizeof(*icp));
202 icp->icp_signature = CXGBEI_PDU_SIGNATURE;
206 a = roundup2((uintptr_t)(icp + 1), _Alignof(struct iscsi_bhs *));
207 ip->ip_bhs = (struct iscsi_bhs *)a;
209 /* Everything must fit entirely in the mbuf. */
210 a = (uintptr_t)(ip->ip_bhs + 1);
211 MPASS(a <= (uintptr_t)m + MSIZE);
213 bzero(ip->ip_bhs, sizeof(*ip->ip_bhs));
215 m->m_data = (void *)ip->ip_bhs;
216 m->m_len = sizeof(struct iscsi_bhs);
217 m->m_pkthdr.len = m->m_len;
223 icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic)
228 refcount_acquire(&ic->ic_outstanding_pdus);
233 * Allocate icl_pdu with empty BHS to fill up by the caller.
235 static struct icl_pdu *
236 icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags)
240 ip = icl_cxgbei_new_pdu(flags);
241 if (__predict_false(ip == NULL))
243 icl_cxgbei_new_pdu_set_conn(ip, ic);
249 icl_pdu_data_segment_length(const struct icl_pdu *request)
253 len += request->ip_bhs->bhs_data_segment_len[0];
255 len += request->ip_bhs->bhs_data_segment_len[1];
257 len += request->ip_bhs->bhs_data_segment_len[2];
263 icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic,
264 const struct icl_pdu *request)
267 return (icl_pdu_data_segment_length(request));
271 finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp)
273 struct icl_pdu *ip = &icp->ip;
274 uint8_t ulp_submode, padding;
275 struct mbuf *m, *last;
276 struct iscsi_bhs *bhs;
279 * Fix up the data segment mbuf first.
281 m = ip->ip_data_mbuf;
282 ulp_submode = icc->ulp_submode;
287 * Round up the data segment to a 4B boundary. Pad with 0 if
288 * necessary. There will definitely be room in the mbuf.
290 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len;
292 bzero(mtod(last, uint8_t *) + last->m_len, padding);
293 last->m_len += padding;
296 MPASS(ip->ip_data_len == 0);
297 ulp_submode &= ~ULP_CRC_DATA;
302 * Now the header mbuf that has the BHS.
305 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs));
306 MPASS(m->m_len == sizeof(struct iscsi_bhs));
309 bhs->bhs_data_segment_len[2] = ip->ip_data_len;
310 bhs->bhs_data_segment_len[1] = ip->ip_data_len >> 8;
311 bhs->bhs_data_segment_len[0] = ip->ip_data_len >> 16;
313 /* "Convert" PDU to mbuf chain. Do not use icp/ip after this. */
314 m->m_pkthdr.len = sizeof(struct iscsi_bhs) + ip->ip_data_len + padding;
315 m->m_next = ip->ip_data_mbuf;
316 set_mbuf_ulp_submode(m, ulp_submode);
318 bzero(icp, sizeof(*icp));
321 refcount_release(&icc->ic.ic_outstanding_pdus);
328 icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip,
329 const void *addr, size_t len, int flags)
333 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
336 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
337 MPASS(ic == ip->ip_conn);
338 KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len));
340 m = ip->ip_data_mbuf;
342 m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES);
343 if (__predict_false(m == NULL))
346 ip->ip_data_mbuf = m;
349 if (__predict_true(m_append(m, len, addr) != 0)) {
350 ip->ip_data_len += len;
351 MPASS(ip->ip_data_len <= ic->ic_max_data_segment_length);
354 if (flags & M_WAITOK) {
355 CXGBE_UNIMPLEMENTED("fail safe append");
357 ip->ip_data_len = m_length(m, NULL);
363 icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip,
364 size_t off, void *addr, size_t len)
366 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
368 if (icp->icp_flags & ICPF_RX_DDP)
369 return; /* data is DDP'ed, no need to copy */
370 m_copydata(ip->ip_data_mbuf, off, len, addr);
374 icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip)
376 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
377 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
378 struct socket *so = ic->ic_socket;
379 struct toepcb *toep = icc->toep;
383 MPASS(ic == ip->ip_conn);
384 MPASS(ip->ip_bhs_mbuf != NULL);
385 /* The kernel doesn't generate PDUs with AHS. */
386 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0);
388 ICL_CONN_LOCK_ASSERT(ic);
389 /* NOTE: sowriteable without so_snd lock is a mostly harmless race. */
390 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) {
391 icl_cxgbei_conn_pdu_free(ic, ip);
395 m = finalize_pdu(icc, icp);
397 MPASS((m->m_pkthdr.len & 3) == 0);
400 * Do not get inp from toep->inp as the toepcb might have detached
405 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) ||
406 __predict_false((toep->flags & TPF_ATTACHED) == 0))
409 mbufq_enqueue(&toep->ulp_pduq, m);
410 t4_push_pdus(icc->sc, toep, 0);
415 static struct icl_conn *
416 icl_cxgbei_new_conn(const char *name, struct mtx *lock)
418 struct icl_cxgbei_conn *icc;
421 refcount_acquire(&icl_cxgbei_ncons);
423 icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE,
425 icc->icc_signature = CXGBEI_CONN_SIGNATURE;
426 STAILQ_INIT(&icc->rcvd_pdus);
431 /* XXXNP: review. Most of these icl_conn fields aren't really used */
432 STAILQ_INIT(&ic->ic_to_send);
433 cv_init(&ic->ic_send_cv, "icl_cxgbei_tx");
434 cv_init(&ic->ic_receive_cv, "icl_cxgbei_rx");
436 refcount_init(&ic->ic_outstanding_pdus, 0);
438 /* This is a stop-gap value that will be corrected during handoff. */
439 ic->ic_max_data_segment_length = 16384;
441 ic->ic_offload = "cxgbei";
442 ic->ic_unmapped = false;
444 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
450 icl_cxgbei_conn_free(struct icl_conn *ic)
452 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
454 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
456 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc);
458 cv_destroy(&ic->ic_send_cv);
459 cv_destroy(&ic->ic_receive_cv);
461 kobj_delete((struct kobj *)icc, M_CXGBE);
462 refcount_release(&icl_cxgbei_ncons);
466 icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace,
470 int error, one = 1, ss, rs;
472 ss = max(sendspace, sspace);
473 rs = max(recvspace, rspace);
475 error = soreserve(so, ss, rs);
477 icl_cxgbei_conn_close(ic);
480 SOCKBUF_LOCK(&so->so_snd);
481 so->so_snd.sb_flags |= SB_AUTOSIZE;
482 SOCKBUF_UNLOCK(&so->so_snd);
483 SOCKBUF_LOCK(&so->so_rcv);
484 so->so_rcv.sb_flags |= SB_AUTOSIZE;
485 SOCKBUF_UNLOCK(&so->so_rcv);
490 bzero(&opt, sizeof(opt));
491 opt.sopt_dir = SOPT_SET;
492 opt.sopt_level = IPPROTO_TCP;
493 opt.sopt_name = TCP_NODELAY;
495 opt.sopt_valsize = sizeof(one);
496 error = sosetopt(so, &opt);
498 icl_cxgbei_conn_close(ic);
506 * Request/response structure used to find out the adapter offloading a socket.
508 struct find_ofld_adapter_rr {
510 struct adapter *sc; /* result */
514 find_offload_adapter(struct adapter *sc, void *arg)
516 struct find_ofld_adapter_rr *fa = arg;
517 struct socket *so = fa->so;
518 struct tom_data *td = sc->tom_softc;
522 /* Non-TCP were filtered out earlier. */
523 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
526 return; /* Found already. */
529 return; /* TOE not enabled on this adapter. */
533 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
535 if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
536 fa->sc = sc; /* Found. */
541 /* XXXNP: move this to t4_tom. */
543 send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
546 struct fw_flowc_wr *flowc;
547 const u_int nparams = 1;
549 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
551 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
553 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
556 panic("%s: allocation failure.", __func__);
559 memset(flowc, 0, wr->wr_len);
561 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
562 V_FW_FLOWC_WR_NPARAMS(nparams));
563 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
564 V_FW_WR_FLOWID(toep->tid));
566 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
567 flowc->mnemval[0].val = htobe32(maxlen);
569 txsd->tx_credits = howmany(flowclen, 16);
571 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
572 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
573 toep->tx_credits -= txsd->tx_credits;
574 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
582 set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, int hcrc, int dcrc)
584 uint64_t val = ULP_MODE_ISCSI;
587 val |= ULP_CRC_HEADER << 4;
589 val |= ULP_CRC_DATA << 4;
591 CTR4(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, CRC hdr=%d data=%d",
592 __func__, toep->tid, hcrc, dcrc);
594 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_ULP_TYPE,
595 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val,
600 * XXXNP: Who is responsible for cleaning up the socket if this returns with an
601 * error? Review all error paths.
603 * XXXNP: What happens to the socket's fd reference if the operation is
604 * successful, and how does that affect the socket's life cycle?
607 icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
609 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
610 struct cxgbei_data *ci;
611 struct find_ofld_adapter_rr fa;
620 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
621 ICL_CONN_LOCK_ASSERT_NOT(ic);
624 * Steal the socket from userland.
626 error = fget(curthread, fd,
627 cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp);
630 if (fp->f_type != DTYPE_SOCKET) {
631 fdrop(fp, curthread);
635 if (so->so_type != SOCK_STREAM ||
636 so->so_proto->pr_protocol != IPPROTO_TCP) {
637 fdrop(fp, curthread);
642 if (ic->ic_socket != NULL) {
644 fdrop(fp, curthread);
647 ic->ic_disconnecting = false;
649 fp->f_ops = &badfileops;
651 fdrop(fp, curthread);
654 /* Find the adapter offloading this socket. */
657 t4_iterate(find_offload_adapter, &fa);
661 ci = icc->sc->iscsi_ulp_softc;
666 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))
670 * socket could not have been "unoffloaded" if here.
672 MPASS(tp->t_flags & TF_TOE);
673 MPASS(tp->tod != NULL);
674 MPASS(tp->t_toe != NULL);
676 MPASS(toep->vi->pi->adapter == icc->sc);
678 icc->cwt = cxgbei_select_worker_thread(icc);
681 * We maintain the _send_ DSL in this field just to have a
682 * convenient way to assert that the kernel never sends
683 * oversized PDUs. This field is otherwise unused in the driver
686 ic->ic_max_data_segment_length = ci->max_tx_pdu_len -
689 icc->ulp_submode = 0;
690 if (ic->ic_header_crc32c) {
691 icc->ulp_submode |= ULP_CRC_HEADER;
692 ic->ic_max_data_segment_length -=
693 ISCSI_HEADER_DIGEST_SIZE;
695 if (ic->ic_data_crc32c) {
696 icc->ulp_submode |= ULP_CRC_DATA;
697 ic->ic_max_data_segment_length -=
698 ISCSI_DATA_DIGEST_SIZE;
700 so->so_options |= SO_NO_DDP;
701 toep->params.ulp_mode = ULP_MODE_ISCSI;
704 send_iscsi_flowc_wr(icc->sc, toep, ci->max_tx_pdu_len);
705 set_ulp_mode_iscsi(icc->sc, toep, ic->ic_header_crc32c,
712 error = icl_cxgbei_setsockopt(ic, so, ci->max_tx_pdu_len,
720 icl_cxgbei_conn_close(struct icl_conn *ic)
722 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
727 struct toepcb *toep = icc->toep;
729 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
730 ICL_CONN_LOCK_ASSERT_NOT(ic);
734 if (ic->ic_disconnecting || so == NULL) {
735 CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p",
736 __func__, icc, ic->ic_disconnecting, so);
740 ic->ic_disconnecting = true;
742 /* These are unused in this driver right now. */
743 MPASS(STAILQ_EMPTY(&ic->ic_to_send));
744 MPASS(ic->ic_receive_pdu == NULL);
747 KASSERT(ic->ic_outstanding_pdus == 0,
748 ("destroying session with %d outstanding PDUs",
749 ic->ic_outstanding_pdus));
753 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1,
758 if (toep != NULL) { /* NULL if connection was never offloaded. */
760 mbufq_drain(&toep->ulp_pduq);
762 if (icc->rx_flags & RXF_ACTIVE) {
763 volatile u_int *p = &icc->rx_flags;
768 while (*p & RXF_ACTIVE)
775 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) {
776 ip = STAILQ_FIRST(&icc->rcvd_pdus);
777 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next);
778 icl_cxgbei_conn_pdu_free(ic, ip);
785 ic->ic_socket = NULL;
789 * XXXNP: we should send RST instead of FIN when PDUs held in various
790 * queues were purged instead of delivered reliably but soabort isn't
791 * really general purpose and wouldn't do the right thing here.
797 icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip,
798 struct ccb_scsiio *csio, uint32_t *ittp, void **arg)
800 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
801 struct toepcb *toep = icc->toep;
802 struct adapter *sc = icc->sc;
803 struct cxgbei_data *ci = sc->iscsi_ulp_softc;
804 struct ppod_region *pr = &ci->pr;
805 struct ppod_reservation *prsv;
809 /* This is for the offload driver's state. Must not be set already. */
813 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN ||
814 csio->dxfer_len < ci->ddp_threshold) {
817 * No DDP for this I/O. Allocate an ITT (based on the one
818 * passed in) that cannot be a valid hardware DDP tag in the
821 itt = *ittp & M_PPOD_TAG;
822 itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit;
823 *ittp = htobe32(itt);
824 MPASS(*arg == NULL); /* State is maintained for DDP only. */
826 counter_u64_add(ci->ddp_setup_error, 1);
831 * Reserve resources for DDP, update the itt that should be used in the
832 * PDU, and save DDP specific state for this I/O in *arg.
835 prsv = uma_zalloc(prsv_zone, M_NOWAIT);
841 /* XXX add support for all CAM_DATA_ types */
842 MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR);
843 rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr,
844 csio->dxfer_len, prsv);
846 uma_zfree(prsv_zone, prsv);
850 rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, prsv,
851 (vm_offset_t)csio->data_ptr, csio->dxfer_len);
853 t4_free_page_pods(prsv);
854 uma_zfree(prsv_zone, prsv);
858 *ittp = htobe32(prsv->prsv_tag);
860 counter_u64_add(ci->ddp_setup_ok, 1);
865 icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg)
869 struct ppod_reservation *prsv = arg;
871 t4_free_page_pods(prsv);
872 uma_zfree(prsv_zone, prsv);
876 /* XXXNP: PDU should be passed in as parameter, like on the initiator. */
877 #define io_to_request_pdu(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr)
878 #define io_to_ppod_reservation(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr)
881 icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io,
882 uint32_t *tttp, void **arg)
884 struct icl_cxgbei_conn *icc = ic_to_icc(ic);
885 struct toepcb *toep = icc->toep;
886 struct ctl_scsiio *ctsio = &io->scsiio;
887 struct adapter *sc = icc->sc;
888 struct cxgbei_data *ci = sc->iscsi_ulp_softc;
889 struct ppod_region *pr = &ci->pr;
890 struct ppod_reservation *prsv;
892 int xferlen, rc = 0, alias;
894 /* This is for the offload driver's state. Must not be set already. */
898 if (ctsio->ext_data_filled == 0) {
900 struct icl_pdu *ip = io_to_request_pdu(io);
903 struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
905 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
906 MPASS(ic == ip->ip_conn);
907 MPASS(ip->ip_bhs_mbuf != NULL);
909 first_burst = icl_pdu_data_segment_length(ip);
912 * Note that ICL calls conn_transfer_setup even if the first
913 * burst had everything and there's nothing left to transfer.
915 MPASS(ctsio->kern_data_len >= first_burst);
916 xferlen = ctsio->kern_data_len;
917 if (xferlen - first_burst < ci->ddp_threshold) {
920 * No DDP for this transfer. Allocate a TTT (based on
921 * the one passed in) that cannot be a valid hardware
922 * DDP tag in the iSCSI region.
924 ttt = *tttp & M_PPOD_TAG;
925 ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit;
926 *tttp = htobe32(ttt);
927 MPASS(io_to_ppod_reservation(io) == NULL);
929 counter_u64_add(ci->ddp_setup_error, 1);
933 if (ctsio->kern_sg_entries == 0)
934 buf = (vm_offset_t)ctsio->kern_data_ptr;
935 else if (ctsio->kern_sg_entries == 1) {
936 struct ctl_sg_entry *sgl = (void *)ctsio->kern_data_ptr;
938 MPASS(sgl->len == xferlen);
939 buf = (vm_offset_t)sgl->addr;
941 rc = EAGAIN; /* XXX implement */
947 * Reserve resources for DDP, update the ttt that should be used
948 * in the PDU, and save DDP specific state for this I/O.
951 MPASS(io_to_ppod_reservation(io) == NULL);
952 prsv = uma_zalloc(prsv_zone, M_NOWAIT);
958 rc = t4_alloc_page_pods_for_buf(pr, buf, xferlen, prsv);
960 uma_zfree(prsv_zone, prsv);
964 rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid,
967 t4_free_page_pods(prsv);
968 uma_zfree(prsv_zone, prsv);
972 *tttp = htobe32(prsv->prsv_tag);
973 io_to_ppod_reservation(io) = prsv;
975 counter_u64_add(ci->ddp_setup_ok, 1);
980 * In the middle of an I/O. A non-NULL page pod reservation indicates
981 * that a DDP buffer is being used for the I/O.
984 prsv = io_to_ppod_reservation(ctsio);
988 alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift;
990 prsv->prsv_tag &= ~pr->pr_alias_mask;
991 prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask;
993 *tttp = htobe32(prsv->prsv_tag);
1000 icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg)
1002 struct ctl_scsiio *ctsio = arg;
1004 if (ctsio != NULL && ctsio->kern_data_len == ctsio->ext_data_filled) {
1005 struct ppod_reservation *prsv;
1007 prsv = io_to_ppod_reservation(ctsio);
1008 MPASS(prsv != NULL);
1010 t4_free_page_pods(prsv);
1011 uma_zfree(prsv_zone, prsv);
1016 cxgbei_limits(struct adapter *sc, void *arg)
1018 struct icl_drv_limits *idl = arg;
1019 struct cxgbei_data *ci;
1022 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims") != 0)
1025 if (uld_active(sc, ULD_ISCSI)) {
1026 ci = sc->iscsi_ulp_softc;
1030 * AHS is not supported by the kernel so we'll not account for
1031 * it either in our PDU len -> data segment len conversions.
1034 max_dsl = ci->max_rx_pdu_len - ISCSI_BHS_SIZE -
1035 ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE;
1036 if (idl->idl_max_recv_data_segment_length > max_dsl)
1037 idl->idl_max_recv_data_segment_length = max_dsl;
1039 max_dsl = ci->max_tx_pdu_len - ISCSI_BHS_SIZE -
1040 ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE;
1041 if (idl->idl_max_send_data_segment_length > max_dsl)
1042 idl->idl_max_send_data_segment_length = max_dsl;
1045 end_synchronized_op(sc, LOCK_HELD);
1049 icl_cxgbei_limits(struct icl_drv_limits *idl)
1052 /* Maximum allowed by the RFC. cxgbei_limits will clip them. */
1053 idl->idl_max_recv_data_segment_length = (1 << 24) - 1;
1054 idl->idl_max_send_data_segment_length = (1 << 24) - 1;
1056 /* These are somewhat arbitrary. */
1057 idl->idl_max_burst_length = 2 * 1024 * 1024;
1058 idl->idl_first_burst_length = 8192;
1060 t4_iterate(cxgbei_limits, idl);
1066 icl_cxgbei_mod_load(void)
1071 * Space to track pagepod reservations.
1073 prsv_zone = uma_zcreate("Pagepod reservations",
1074 sizeof(struct ppod_reservation), NULL, NULL, NULL, NULL,
1075 UMA_ALIGN_CACHE, 0);
1077 refcount_init(&icl_cxgbei_ncons, 0);
1079 rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits,
1080 icl_cxgbei_new_conn);
1086 icl_cxgbei_mod_unload(void)
1089 if (icl_cxgbei_ncons != 0)
1092 icl_unregister("cxgbei", false);
1094 uma_zdestroy(prsv_zone);