2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include "opt_inet6.h"
35 #include "opt_ratelimit.h"
38 #include <sys/param.h>
41 #include <sys/kernel.h>
43 #include <sys/module.h>
45 #include <sys/protosw.h>
46 #include <sys/domain.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sglist.h>
50 #include <sys/taskqueue.h>
51 #include <netinet/in.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
56 #include <netinet/tcp_fsm.h>
57 #include <netinet/tcp_seq.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/toecore.h>
61 #include <security/mac/mac_framework.h>
64 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_page.h>
69 #include "common/common.h"
70 #include "common/t4_msg.h"
71 #include "common/t4_regs.h"
72 #include "common/t4_tcb.h"
73 #include "tom/t4_tom_l2t.h"
74 #include "tom/t4_tom.h"
76 static void t4_aiotx_cancel(struct kaiocb *job);
77 static void t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep);
80 aiotx_mbuf_pgoff(struct mbuf *m)
82 struct aiotx_buffer *ab;
84 MPASS(IS_AIOTX_MBUF(m));
85 ab = m->m_ext.ext_arg1;
86 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE);
90 aiotx_mbuf_pages(struct mbuf *m)
92 struct aiotx_buffer *ab;
95 MPASS(IS_AIOTX_MBUF(m));
96 ab = m->m_ext.ext_arg1;
97 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE;
98 return (ab->ps.pages + npages);
102 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
105 struct fw_flowc_wr *flowc;
106 unsigned int nparams, flowclen, paramidx;
107 struct vi_info *vi = toep->vi;
108 struct port_info *pi = vi->pi;
109 struct adapter *sc = pi->adapter;
110 unsigned int pfvf = sc->pf << S_FW_VIID_PFN;
111 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
113 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
114 ("%s: flowc for tid %u sent already", __func__, toep->tid));
120 if (toep->ulp_mode == ULP_MODE_TLS)
122 if (toep->tls.fcplenmax != 0)
124 if (toep->tc_idx != -1) {
125 MPASS(toep->tc_idx >= 0 &&
126 toep->tc_idx < sc->chip_params->nsched_cls);
130 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
132 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
135 panic("%s: allocation failure.", __func__);
138 memset(flowc, 0, wr->wr_len);
140 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
141 V_FW_FLOWC_WR_NPARAMS(nparams));
142 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
143 V_FW_WR_FLOWID(toep->tid));
145 #define FLOWC_PARAM(__m, __v) \
147 flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \
148 flowc->mnemval[paramidx].val = htobe32(__v); \
154 FLOWC_PARAM(PFNVFN, pfvf);
155 FLOWC_PARAM(CH, pi->tx_chan);
156 FLOWC_PARAM(PORT, pi->tx_chan);
157 FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id);
159 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);
161 FLOWC_PARAM(SNDNXT, ftxp->snd_nxt);
162 FLOWC_PARAM(RCVNXT, ftxp->rcv_nxt);
163 FLOWC_PARAM(SNDBUF, sndbuf);
164 FLOWC_PARAM(MSS, ftxp->mss);
167 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
168 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt,
171 FLOWC_PARAM(SNDBUF, 512);
172 FLOWC_PARAM(MSS, 512);
174 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
176 if (toep->ulp_mode == ULP_MODE_TLS)
177 FLOWC_PARAM(ULP_MODE, toep->ulp_mode);
178 if (toep->tls.fcplenmax != 0)
179 FLOWC_PARAM(TXDATAPLEN_MAX, toep->tls.fcplenmax);
180 if (toep->tc_idx != -1)
181 FLOWC_PARAM(SCHEDCLASS, toep->tc_idx);
184 KASSERT(paramidx == nparams, ("nparams mismatch"));
186 txsd->tx_credits = howmany(flowclen, 16);
188 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
189 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
190 toep->tx_credits -= txsd->tx_credits;
191 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
195 toep->flags |= TPF_FLOWC_WR_SENT;
201 * Input is Bytes/second (so_max_pacing_rate), chip counts in Kilobits/second.
204 update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
207 const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000;
208 const int port_id = toep->vi->pi->port_id;
210 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps);
216 rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx);
219 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
222 if (toep->tc_idx != tc_idx) {
224 struct fw_flowc_wr *flowc;
225 int nparams = 1, flowclen, flowclen16;
226 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
228 flowclen = sizeof(*flowc) + nparams * sizeof(struct
230 flowclen16 = howmany(flowclen, 16);
231 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 ||
232 (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) {
234 t4_release_cl_rl(sc, port_id, tc_idx);
239 memset(flowc, 0, wr->wr_len);
241 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
242 V_FW_FLOWC_WR_NPARAMS(nparams));
243 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) |
244 V_FW_WR_FLOWID(toep->tid));
246 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
248 flowc->mnemval[0].val = htobe32(0xff);
250 flowc->mnemval[0].val = htobe32(tc_idx);
252 txsd->tx_credits = flowclen16;
254 toep->tx_credits -= txsd->tx_credits;
255 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
261 if (toep->tc_idx >= 0)
262 t4_release_cl_rl(sc, port_id, toep->tc_idx);
263 toep->tc_idx = tc_idx;
270 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
273 struct cpl_abort_req *req;
275 struct inpcb *inp = toep->inp;
276 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */
278 INP_WLOCK_ASSERT(inp);
280 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s",
282 inp->inp_flags & INP_DROPPED ? "inp dropped" :
283 tcpstates[tp->t_state],
284 toep->flags, inp->inp_flags,
285 toep->flags & TPF_ABORT_SHUTDOWN ?
286 " (abort already in progress)" : "");
288 if (toep->flags & TPF_ABORT_SHUTDOWN)
289 return; /* abort already in progress */
291 toep->flags |= TPF_ABORT_SHUTDOWN;
293 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
294 ("%s: flowc_wr not sent for tid %d.", __func__, tid));
296 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
299 panic("%s: allocation failure.", __func__);
303 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid);
304 if (inp->inp_flags & INP_DROPPED)
305 req->rsvd0 = htobe32(snd_nxt);
307 req->rsvd0 = htobe32(tp->snd_nxt);
308 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT);
309 req->cmd = CPL_ABORT_SEND_RST;
312 * XXX: What's the correct way to tell that the inp hasn't been detached
313 * from its socket? Should I even be flushing the snd buffer here?
315 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
316 struct socket *so = inp->inp_socket;
318 if (so != NULL) /* because I'm not sure. See comment above */
319 sbflush(&so->so_snd);
322 t4_l2t_send(sc, wr, toep->l2te);
326 * Called when a connection is established to translate the TCP options
327 * reported by HW to FreeBSD's native format.
330 assign_rxopt(struct tcpcb *tp, uint16_t opt)
332 struct toepcb *toep = tp->t_toe;
333 struct inpcb *inp = tp->t_inpcb;
334 struct adapter *sc = td_adapter(toep->td);
336 INP_LOCK_ASSERT(inp);
339 toep->mtu_idx = G_TCPOPT_MSS(opt);
340 tp->t_maxseg = sc->params.mtus[toep->mtu_idx];
341 if (inp->inp_inc.inc_flags & INC_ISIPV6)
342 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
344 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr);
346 toep->emss = tp->t_maxseg;
347 if (G_TCPOPT_TSTAMP(opt)) {
348 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */
349 tp->ts_recent = 0; /* hmmm */
350 tp->ts_recent_age = tcp_ts_getticks();
351 toep->emss -= TCPOLEN_TSTAMP_APPA;
354 CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u",
355 __func__, toep->tid, toep->mtu_idx,
356 sc->params.mtus[G_TCPOPT_MSS(opt)], tp->t_maxseg, toep->emss);
358 if (G_TCPOPT_SACK(opt))
359 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */
361 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */
363 if (G_TCPOPT_WSCALE_OK(opt))
364 tp->t_flags |= TF_RCVD_SCALE;
366 /* Doing window scaling? */
367 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
368 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
369 tp->rcv_scale = tp->request_r_scale;
370 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt);
375 * Completes some final bits of initialization for just established connections
376 * and changes their state to TCPS_ESTABLISHED.
378 * The ISNs are from the exchange of SYNs.
381 make_established(struct toepcb *toep, uint32_t iss, uint32_t irs, uint16_t opt)
383 struct inpcb *inp = toep->inp;
384 struct socket *so = inp->inp_socket;
385 struct tcpcb *tp = intotcpcb(inp);
387 uint16_t tcpopt = be16toh(opt);
388 struct flowc_tx_params ftxp;
390 INP_WLOCK_ASSERT(inp);
391 KASSERT(tp->t_state == TCPS_SYN_SENT ||
392 tp->t_state == TCPS_SYN_RECEIVED,
393 ("%s: TCP state %s", __func__, tcpstates[tp->t_state]));
395 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p",
396 __func__, toep->tid, so, inp, tp, toep);
398 tcp_state_change(tp, TCPS_ESTABLISHED);
399 tp->t_starttime = ticks;
400 TCPSTAT_INC(tcps_connects);
404 tp->rcv_wnd = (u_int)toep->opt0_rcv_bufsize << 10;
405 tp->rcv_adv += tp->rcv_wnd;
406 tp->last_ack_sent = tp->rcv_nxt;
410 tp->snd_una = iss + 1;
411 tp->snd_nxt = iss + 1;
412 tp->snd_max = iss + 1;
414 assign_rxopt(tp, tcpopt);
416 SOCKBUF_LOCK(&so->so_snd);
417 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf)
418 bufsize = V_tcp_autosndbuf_max;
420 bufsize = sbspace(&so->so_snd);
421 SOCKBUF_UNLOCK(&so->so_snd);
423 ftxp.snd_nxt = tp->snd_nxt;
424 ftxp.rcv_nxt = tp->rcv_nxt;
425 ftxp.snd_space = bufsize;
426 ftxp.mss = toep->emss;
427 send_flowc_wr(toep, &ftxp);
433 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits)
436 struct cpl_rx_data_ack *req;
437 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
439 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits));
441 wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
446 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid);
447 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits));
454 send_rx_modulate(struct adapter *sc, struct toepcb *toep)
457 struct cpl_rx_data_ack *req;
459 wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
464 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid);
465 req->credit_dack = htobe32(F_RX_MODULATE_RX);
471 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp)
473 struct adapter *sc = tod->tod_softc;
474 struct inpcb *inp = tp->t_inpcb;
475 struct socket *so = inp->inp_socket;
476 struct sockbuf *sb = &so->so_rcv;
477 struct toepcb *toep = tp->t_toe;
480 INP_WLOCK_ASSERT(inp);
481 SOCKBUF_LOCK_ASSERT(sb);
483 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
484 if (toep->ulp_mode == ULP_MODE_TLS) {
485 if (toep->tls.rcv_over >= rx_credits) {
486 toep->tls.rcv_over -= rx_credits;
489 rx_credits -= toep->tls.rcv_over;
490 toep->tls.rcv_over = 0;
494 if (rx_credits > 0 &&
495 (tp->rcv_wnd <= 32 * 1024 || rx_credits >= 64 * 1024 ||
496 (rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) ||
497 sbused(sb) + tp->rcv_wnd < sb->sb_lowat)) {
498 rx_credits = send_rx_credits(sc, toep, rx_credits);
499 tp->rcv_wnd += rx_credits;
500 tp->rcv_adv += rx_credits;
501 } else if (toep->flags & TPF_FORCE_CREDITS)
502 send_rx_modulate(sc, toep);
506 t4_rcvd(struct toedev *tod, struct tcpcb *tp)
508 struct inpcb *inp = tp->t_inpcb;
509 struct socket *so = inp->inp_socket;
510 struct sockbuf *sb = &so->so_rcv;
513 t4_rcvd_locked(tod, tp);
518 * Close a connection by sending a CPL_CLOSE_CON_REQ message.
521 t4_close_conn(struct adapter *sc, struct toepcb *toep)
524 struct cpl_close_con_req *req;
525 unsigned int tid = toep->tid;
527 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid,
528 toep->flags & TPF_FIN_SENT ? ", IGNORED" : "");
530 if (toep->flags & TPF_FIN_SENT)
533 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
534 ("%s: flowc_wr not sent for tid %u.", __func__, tid));
536 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
539 panic("%s: allocation failure.", __func__);
543 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) |
544 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr)));
545 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) |
546 V_FW_WR_FLOWID(tid));
547 req->wr.wr_lo = cpu_to_be64(0);
548 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
551 toep->flags |= TPF_FIN_SENT;
552 toep->flags &= ~TPF_SEND_FIN;
553 t4_l2t_send(sc, wr, toep->l2te);
558 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
559 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16))
561 /* Maximum amount of immediate data we could stuff in a WR */
563 max_imm_payload(int tx_credits)
565 const int n = 2; /* Use only up to 2 desc for imm. data WR */
567 KASSERT(tx_credits >= 0 &&
568 tx_credits <= MAX_OFLD_TX_CREDITS,
569 ("%s: %d credits", __func__, tx_credits));
571 if (tx_credits < MIN_OFLD_TX_CREDITS)
574 if (tx_credits >= (n * EQ_ESIZE) / 16)
575 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr));
577 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr));
580 /* Maximum number of SGL entries we could stuff in a WR */
582 max_dsgl_nsegs(int tx_credits)
584 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */
585 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS;
587 KASSERT(tx_credits >= 0 &&
588 tx_credits <= MAX_OFLD_TX_CREDITS,
589 ("%s: %d credits", __func__, tx_credits));
591 if (tx_credits < MIN_OFLD_TX_CREDITS)
594 nseg += 2 * (sge_pair_credits * 16 / 24);
595 if ((sge_pair_credits * 16) % 24 == 16)
602 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
603 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign)
605 struct fw_ofld_tx_data_wr *txwr = dst;
607 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) |
608 V_FW_WR_IMMDLEN(immdlen));
609 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
610 V_FW_WR_LEN16(credits));
611 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) |
612 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove));
613 txwr->plen = htobe32(plen);
616 struct tcpcb *tp = intotcpcb(toep->inp);
618 if (plen < 2 * toep->emss)
619 txwr->lsodisable_to_flags |=
620 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE);
622 txwr->lsodisable_to_flags |=
623 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD |
624 (tp->t_flags & TF_NODELAY ? 0 :
625 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE));
630 * Generate a DSGL from a starting mbuf. The total number of segments and the
631 * maximum segments in any one mbuf are provided.
634 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
637 struct ulptx_sgl *usgl = dst;
640 struct sglist_seg segs[n];
642 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
644 sglist_init(&sg, n, segs);
645 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
646 V_ULPTX_NSGE(nsegs));
649 for (m = start; m != stop; m = m->m_next) {
650 if (IS_AIOTX_MBUF(m))
651 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m),
652 aiotx_mbuf_pgoff(m), m->m_len);
654 rc = sglist_append(&sg, mtod(m, void *), m->m_len);
655 if (__predict_false(rc != 0))
656 panic("%s: sglist_append %d", __func__, rc);
658 for (j = 0; j < sg.sg_nseg; i++, j++) {
660 usgl->len0 = htobe32(segs[j].ss_len);
661 usgl->addr0 = htobe64(segs[j].ss_paddr);
663 usgl->sge[i / 2].len[i & 1] =
664 htobe32(segs[j].ss_len);
665 usgl->sge[i / 2].addr[i & 1] =
666 htobe64(segs[j].ss_paddr);
675 usgl->sge[i / 2].len[1] = htobe32(0);
676 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p",
677 __func__, nsegs, start, stop));
681 * Max number of SGL entries an offload tx work request can have. This is 41
682 * (1 + 40) for a full 512B work request.
683 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40)
685 #define OFLD_SGL_LEN (41)
688 * Send data and/or a FIN to the peer.
690 * The socket's so_snd buffer consists of a stream of data starting with sb_mb
691 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that
694 * drop indicates the number of bytes that should be dropped from the head of
695 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating
696 * contention on the send buffer lock (before this change it used to do
697 * sowwakeup and then t4_push_frames right after that when recovering from tx
698 * stalls). When drop is set this function MUST drop the bytes and wake up any
702 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
704 struct mbuf *sndptr, *m, *sb_sndptr;
705 struct fw_ofld_tx_data_wr *txwr;
707 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
708 struct inpcb *inp = toep->inp;
709 struct tcpcb *tp = intotcpcb(inp);
710 struct socket *so = inp->inp_socket;
711 struct sockbuf *sb = &so->so_snd;
712 int tx_credits, shove, compl, sowwakeup;
713 struct ofld_tx_sdesc *txsd;
714 bool aiotx_mbuf_seen;
716 INP_WLOCK_ASSERT(inp);
717 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
718 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
720 KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
721 toep->ulp_mode == ULP_MODE_TCPDDP ||
722 toep->ulp_mode == ULP_MODE_TLS ||
723 toep->ulp_mode == ULP_MODE_RDMA,
724 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
726 #ifdef VERBOSE_TRACES
727 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
728 __func__, toep->tid, toep->flags, tp->t_flags);
730 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
734 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
735 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
736 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
741 * This function doesn't resume by itself. Someone else must clear the
742 * flag and call this function.
744 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
746 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
750 txsd = &toep->txsd[toep->txsd_pidx];
752 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
753 max_imm = max_imm_payload(tx_credits);
754 max_nsegs = max_dsgl_nsegs(tx_credits);
759 sbdrop_locked(sb, drop);
762 sb_sndptr = sb->sb_sndptr;
763 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb;
766 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
767 aiotx_mbuf_seen = false;
768 for (m = sndptr; m != NULL; m = m->m_next) {
771 if (IS_AIOTX_MBUF(m))
772 n = sglist_count_vmpages(aiotx_mbuf_pages(m),
773 aiotx_mbuf_pgoff(m), m->m_len);
775 n = sglist_count(mtod(m, void *), m->m_len);
780 /* This mbuf sent us _over_ the nsegs limit, back out */
781 if (plen > max_imm && nsegs > max_nsegs) {
785 /* Too few credits */
786 toep->flags |= TPF_TX_SUSPENDED;
790 t4_aiotx_queue_toep(so,
792 sowwakeup_locked(so);
795 SOCKBUF_UNLOCK_ASSERT(sb);
801 if (IS_AIOTX_MBUF(m))
802 aiotx_mbuf_seen = true;
803 if (max_nsegs_1mbuf < n)
805 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */
807 /* This mbuf put us right at the max_nsegs limit */
808 if (plen > max_imm && nsegs == max_nsegs) {
814 if (sbused(sb) > sb->sb_hiwat * 5 / 8 &&
815 toep->plen_nocompl + plen >= sb->sb_hiwat / 4)
820 if (sb->sb_flags & SB_AUTOSIZE &&
821 V_tcp_do_autosndbuf &&
822 sb->sb_hiwat < V_tcp_autosndbuf_max &&
823 sbused(sb) >= sb->sb_hiwat * 7 / 8) {
824 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
825 V_tcp_autosndbuf_max);
827 if (!sbreserve_locked(sb, newsize, so, NULL))
828 sb->sb_flags &= ~SB_AUTOSIZE;
830 sowwakeup = 1; /* room available */
833 if (!TAILQ_EMPTY(&toep->aiotx_jobq))
834 t4_aiotx_queue_toep(so, toep);
835 sowwakeup_locked(so);
838 SOCKBUF_UNLOCK_ASSERT(sb);
840 /* nothing to send */
843 ("%s: nothing to send, but m != NULL", __func__));
847 if (__predict_false(toep->flags & TPF_FIN_SENT))
848 panic("%s: excess tx.", __func__);
850 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME);
851 if (plen <= max_imm && !aiotx_mbuf_seen) {
853 /* Immediate data tx */
855 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
858 /* XXX: how will we recover from this? */
859 toep->flags |= TPF_TX_SUSPENDED;
863 credits = howmany(wr->wr_len, 16);
864 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0,
866 m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
873 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
874 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
875 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
877 /* XXX: how will we recover from this? */
878 toep->flags |= TPF_TX_SUSPENDED;
882 credits = howmany(wr_len, 16);
883 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0,
885 write_tx_sgl(txwr + 1, sndptr, m, nsegs,
888 uint64_t *pad = (uint64_t *)
889 ((uintptr_t)txwr + wr_len);
894 KASSERT(toep->tx_credits >= credits,
895 ("%s: not enough credits", __func__));
897 toep->tx_credits -= credits;
898 toep->tx_nocompl += credits;
899 toep->plen_nocompl += plen;
900 if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
901 toep->tx_nocompl >= toep->tx_total / 4)
904 if (compl || toep->ulp_mode == ULP_MODE_RDMA) {
905 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
906 toep->tx_nocompl = 0;
907 toep->plen_nocompl = 0;
914 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__));
915 sb->sb_sndptr = sb_sndptr;
918 toep->flags |= TPF_TX_DATA_SENT;
919 if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
920 toep->flags |= TPF_TX_SUSPENDED;
922 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
924 txsd->tx_credits = credits;
926 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
928 txsd = &toep->txsd[0];
932 t4_l2t_send(sc, wr, toep->l2te);
935 /* Send a FIN if requested, but only if there's no more data to send */
936 if (m == NULL && toep->flags & TPF_SEND_FIN)
937 t4_close_conn(sc, toep);
941 rqdrop_locked(struct mbufq *q, int plen)
946 m = mbufq_dequeue(q);
948 /* Too many credits. */
952 /* Partial credits. */
953 MPASS(plen >= m->m_pkthdr.len);
955 plen -= m->m_pkthdr.len;
961 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
963 struct mbuf *sndptr, *m;
964 struct fw_ofld_tx_data_wr *txwr;
966 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
967 u_int adjusted_plen, ulp_submode;
968 struct inpcb *inp = toep->inp;
969 struct tcpcb *tp = intotcpcb(inp);
970 int tx_credits, shove;
971 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
972 struct mbufq *pduq = &toep->ulp_pduq;
973 static const u_int ulp_extra_len[] = {0, 4, 4, 8};
975 INP_WLOCK_ASSERT(inp);
976 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
977 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
978 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
979 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
981 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
985 * This function doesn't resume by itself. Someone else must clear the
986 * flag and call this function.
988 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
990 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
995 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop);
997 while ((sndptr = mbufq_first(pduq)) != NULL) {
998 M_ASSERTPKTHDR(sndptr);
1000 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1001 max_imm = max_imm_payload(tx_credits);
1002 max_nsegs = max_dsgl_nsegs(tx_credits);
1006 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1007 for (m = sndptr; m != NULL; m = m->m_next) {
1008 int n = sglist_count(mtod(m, void *), m->m_len);
1014 * This mbuf would send us _over_ the nsegs limit.
1015 * Suspend tx because the PDU can't be sent out.
1017 if (plen > max_imm && nsegs > max_nsegs) {
1018 toep->flags |= TPF_TX_SUSPENDED;
1022 if (max_nsegs_1mbuf < n)
1023 max_nsegs_1mbuf = n;
1026 if (__predict_false(toep->flags & TPF_FIN_SENT))
1027 panic("%s: excess tx.", __func__);
1030 * We have a PDU to send. All of it goes out in one WR so 'm'
1031 * is NULL. A PDU's length is always a multiple of 4.
1034 MPASS((plen & 3) == 0);
1035 MPASS(sndptr->m_pkthdr.len == plen);
1037 shove = !(tp->t_flags & TF_MORETOCOME);
1038 ulp_submode = mbuf_ulp_submode(sndptr);
1039 MPASS(ulp_submode < nitems(ulp_extra_len));
1042 * plen doesn't include header and data digests, which are
1043 * generated and inserted in the right places by the TOE, but
1044 * they do occupy TCP sequence space and need to be accounted
1047 adjusted_plen = plen + ulp_extra_len[ulp_submode];
1048 if (plen <= max_imm) {
1050 /* Immediate data tx */
1052 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
1055 /* XXX: how will we recover from this? */
1056 toep->flags |= TPF_TX_SUSPENDED;
1060 credits = howmany(wr->wr_len, 16);
1061 write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
1062 shove, ulp_submode, sc->tt.tx_align);
1063 m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
1069 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
1070 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1071 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1073 /* XXX: how will we recover from this? */
1074 toep->flags |= TPF_TX_SUSPENDED;
1078 credits = howmany(wr_len, 16);
1079 write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
1080 shove, ulp_submode, sc->tt.tx_align);
1081 write_tx_sgl(txwr + 1, sndptr, m, nsegs,
1084 uint64_t *pad = (uint64_t *)
1085 ((uintptr_t)txwr + wr_len);
1090 KASSERT(toep->tx_credits >= credits,
1091 ("%s: not enough credits", __func__));
1093 m = mbufq_dequeue(pduq);
1095 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m);
1097 toep->tx_credits -= credits;
1098 toep->tx_nocompl += credits;
1099 toep->plen_nocompl += plen;
1100 if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
1101 toep->tx_nocompl >= toep->tx_total / 4) {
1102 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
1103 toep->tx_nocompl = 0;
1104 toep->plen_nocompl = 0;
1107 tp->snd_nxt += adjusted_plen;
1108 tp->snd_max += adjusted_plen;
1110 toep->flags |= TPF_TX_DATA_SENT;
1111 if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
1112 toep->flags |= TPF_TX_SUSPENDED;
1114 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1116 txsd->tx_credits = credits;
1118 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1119 toep->txsd_pidx = 0;
1120 txsd = &toep->txsd[0];
1124 t4_l2t_send(sc, wr, toep->l2te);
1127 /* Send a FIN if requested, but only if there are no more PDUs to send */
1128 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN)
1129 t4_close_conn(sc, toep);
1133 t4_tod_output(struct toedev *tod, struct tcpcb *tp)
1135 struct adapter *sc = tod->tod_softc;
1137 struct inpcb *inp = tp->t_inpcb;
1139 struct toepcb *toep = tp->t_toe;
1141 INP_WLOCK_ASSERT(inp);
1142 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1143 ("%s: inp %p dropped.", __func__, inp));
1144 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1146 if (toep->ulp_mode == ULP_MODE_ISCSI)
1147 t4_push_pdus(sc, toep, 0);
1148 else if (tls_tx_key(toep))
1149 t4_push_tls_records(sc, toep, 0);
1151 t4_push_frames(sc, toep, 0);
1157 t4_send_fin(struct toedev *tod, struct tcpcb *tp)
1159 struct adapter *sc = tod->tod_softc;
1161 struct inpcb *inp = tp->t_inpcb;
1163 struct toepcb *toep = tp->t_toe;
1165 INP_WLOCK_ASSERT(inp);
1166 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1167 ("%s: inp %p dropped.", __func__, inp));
1168 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1170 toep->flags |= TPF_SEND_FIN;
1171 if (tp->t_state >= TCPS_ESTABLISHED) {
1172 if (toep->ulp_mode == ULP_MODE_ISCSI)
1173 t4_push_pdus(sc, toep, 0);
1174 else if (tls_tx_key(toep))
1175 t4_push_tls_records(sc, toep, 0);
1177 t4_push_frames(sc, toep, 0);
1184 t4_send_rst(struct toedev *tod, struct tcpcb *tp)
1186 struct adapter *sc = tod->tod_softc;
1187 #if defined(INVARIANTS)
1188 struct inpcb *inp = tp->t_inpcb;
1190 struct toepcb *toep = tp->t_toe;
1192 INP_WLOCK_ASSERT(inp);
1193 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1194 ("%s: inp %p dropped.", __func__, inp));
1195 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1198 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1199 ("%s: flowc for tid %u [%s] not sent already",
1200 __func__, toep->tid, tcpstates[tp->t_state]));
1202 send_reset(sc, toep, 0);
1207 * Peer has sent us a FIN.
1210 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1212 struct adapter *sc = iq->adapter;
1213 const struct cpl_peer_close *cpl = (const void *)(rss + 1);
1214 unsigned int tid = GET_TID(cpl);
1215 struct toepcb *toep = lookup_tid(sc, tid);
1216 struct inpcb *inp = toep->inp;
1217 struct tcpcb *tp = NULL;
1219 struct epoch_tracker et;
1221 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1224 KASSERT(opcode == CPL_PEER_CLOSE,
1225 ("%s: unexpected opcode 0x%x", __func__, opcode));
1226 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1228 if (__predict_false(toep->flags & TPF_SYNQE)) {
1230 * do_pass_establish must have run before do_peer_close and if
1231 * this is still a synqe instead of a toepcb then the connection
1232 * must be getting aborted.
1234 MPASS(toep->flags & TPF_ABORT_SHUTDOWN);
1235 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1240 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1242 CURVNET_SET(toep->vnet);
1243 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1245 tp = intotcpcb(inp);
1247 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__,
1248 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp);
1250 if (toep->flags & TPF_ABORT_SHUTDOWN)
1253 tp->rcv_nxt++; /* FIN */
1255 so = inp->inp_socket;
1257 if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1259 if (__predict_false(toep->ddp.flags &
1260 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)))
1261 handle_ddp_close(toep, tp, cpl->rcv_nxt);
1265 if (toep->ulp_mode != ULP_MODE_RDMA) {
1266 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
1267 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
1268 be32toh(cpl->rcv_nxt)));
1271 switch (tp->t_state) {
1272 case TCPS_SYN_RECEIVED:
1273 tp->t_starttime = ticks;
1276 case TCPS_ESTABLISHED:
1277 tcp_state_change(tp, TCPS_CLOSE_WAIT);
1280 case TCPS_FIN_WAIT_1:
1281 tcp_state_change(tp, TCPS_CLOSING);
1284 case TCPS_FIN_WAIT_2:
1286 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
1287 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1291 final_cpl_received(toep);
1295 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n",
1296 __func__, tid, tp->t_state);
1300 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1306 * Peer has ACK'd our FIN.
1309 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
1312 struct adapter *sc = iq->adapter;
1313 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1);
1314 unsigned int tid = GET_TID(cpl);
1315 struct toepcb *toep = lookup_tid(sc, tid);
1316 struct inpcb *inp = toep->inp;
1317 struct tcpcb *tp = NULL;
1318 struct socket *so = NULL;
1319 struct epoch_tracker et;
1321 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1324 KASSERT(opcode == CPL_CLOSE_CON_RPL,
1325 ("%s: unexpected opcode 0x%x", __func__, opcode));
1326 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1327 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1329 CURVNET_SET(toep->vnet);
1330 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1332 tp = intotcpcb(inp);
1334 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x",
1335 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags);
1337 if (toep->flags & TPF_ABORT_SHUTDOWN)
1340 so = inp->inp_socket;
1341 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */
1343 switch (tp->t_state) {
1344 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */
1347 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
1348 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1352 final_cpl_received(toep); /* no more CPLs expected */
1360 case TCPS_FIN_WAIT_1:
1361 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
1362 soisdisconnected(so);
1363 tcp_state_change(tp, TCPS_FIN_WAIT_2);
1368 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n",
1369 __func__, tid, tcpstates[tp->t_state]);
1373 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1379 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid,
1383 struct cpl_abort_rpl *cpl;
1385 wr = alloc_wrqe(sizeof(*cpl), ofld_txq);
1388 panic("%s: allocation failure.", __func__);
1392 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid);
1393 cpl->cmd = rst_status;
1399 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason)
1401 switch (abort_reason) {
1402 case CPL_ERR_BAD_SYN:
1403 case CPL_ERR_CONN_RESET:
1404 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET);
1405 case CPL_ERR_XMIT_TIMEDOUT:
1406 case CPL_ERR_PERSIST_TIMEDOUT:
1407 case CPL_ERR_FINWAIT2_TIMEDOUT:
1408 case CPL_ERR_KEEPALIVE_TIMEDOUT:
1416 * TCP RST from the peer, timeout, or some other such critical error.
1419 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1421 struct adapter *sc = iq->adapter;
1422 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1);
1423 unsigned int tid = GET_TID(cpl);
1424 struct toepcb *toep = lookup_tid(sc, tid);
1425 struct sge_wrq *ofld_txq = toep->ofld_txq;
1428 struct epoch_tracker et;
1430 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1433 KASSERT(opcode == CPL_ABORT_REQ_RSS,
1434 ("%s: unexpected opcode 0x%x", __func__, opcode));
1435 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1437 if (toep->flags & TPF_SYNQE)
1438 return (do_abort_req_synqe(iq, rss, m));
1440 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1442 if (negative_advice(cpl->status)) {
1443 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)",
1444 __func__, cpl->status, tid, toep->flags);
1445 return (0); /* Ignore negative advice */
1449 CURVNET_SET(toep->vnet);
1450 INP_INFO_RLOCK_ET(&V_tcbinfo, et); /* for tcp_close */
1453 tp = intotcpcb(inp);
1456 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d",
1457 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags,
1458 inp->inp_flags, cpl->status);
1461 * If we'd initiated an abort earlier the reply to it is responsible for
1462 * cleaning up resources. Otherwise we tear everything down right here
1463 * right now. We owe the T4 a CPL_ABORT_RPL no matter what.
1465 if (toep->flags & TPF_ABORT_SHUTDOWN) {
1469 toep->flags |= TPF_ABORT_SHUTDOWN;
1471 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
1472 struct socket *so = inp->inp_socket;
1475 so_error_set(so, abort_status_to_errno(tp,
1479 INP_WLOCK(inp); /* re-acquire */
1482 final_cpl_received(toep);
1484 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1486 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
1491 * Reply to the CPL_ABORT_REQ (send_reset)
1494 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1496 struct adapter *sc = iq->adapter;
1497 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1498 unsigned int tid = GET_TID(cpl);
1499 struct toepcb *toep = lookup_tid(sc, tid);
1500 struct inpcb *inp = toep->inp;
1502 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1505 KASSERT(opcode == CPL_ABORT_RPL_RSS,
1506 ("%s: unexpected opcode 0x%x", __func__, opcode));
1507 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1509 if (toep->flags & TPF_SYNQE)
1510 return (do_abort_rpl_synqe(iq, rss, m));
1512 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1514 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d",
1515 __func__, tid, toep, inp, cpl->status);
1517 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1518 ("%s: wasn't expecting abort reply", __func__));
1521 final_cpl_received(toep);
1527 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1529 struct adapter *sc = iq->adapter;
1530 const struct cpl_rx_data *cpl = mtod(m, const void *);
1531 unsigned int tid = GET_TID(cpl);
1532 struct toepcb *toep = lookup_tid(sc, tid);
1533 struct inpcb *inp = toep->inp;
1537 struct epoch_tracker et;
1538 int len, rx_credits;
1539 uint32_t ddp_placed = 0;
1541 if (__predict_false(toep->flags & TPF_SYNQE)) {
1543 * do_pass_establish must have run before do_rx_data and if this
1544 * is still a synqe instead of a toepcb then the connection must
1545 * be getting aborted.
1547 MPASS(toep->flags & TPF_ABORT_SHUTDOWN);
1548 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1554 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1556 /* strip off CPL header */
1557 m_adj(m, sizeof(*cpl));
1558 len = m->m_pkthdr.len;
1561 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1562 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1563 __func__, tid, len, inp->inp_flags);
1569 tp = intotcpcb(inp);
1571 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq)))
1572 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt;
1575 if (tp->rcv_wnd < len) {
1576 KASSERT(toep->ulp_mode == ULP_MODE_RDMA,
1577 ("%s: negative window size", __func__));
1581 tp->t_rcvtime = ticks;
1583 if (toep->ulp_mode == ULP_MODE_TCPDDP)
1585 so = inp_inpcbtosocket(inp);
1589 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1590 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1591 __func__, tid, len);
1594 if (toep->ulp_mode == ULP_MODE_TCPDDP)
1598 CURVNET_SET(toep->vnet);
1599 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1601 tp = tcp_drop(tp, ECONNRESET);
1604 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1610 /* receive buffer autosize */
1611 MPASS(toep->vnet == so->so_vnet);
1612 CURVNET_SET(toep->vnet);
1613 if (sb->sb_flags & SB_AUTOSIZE &&
1614 V_tcp_do_autorcvbuf &&
1615 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1616 len > (sbspace(sb) / 8 * 7)) {
1617 unsigned int hiwat = sb->sb_hiwat;
1618 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
1619 V_tcp_autorcvbuf_max);
1621 if (!sbreserve_locked(sb, newsize, so, NULL))
1622 sb->sb_flags &= ~SB_AUTOSIZE;
1625 if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1626 int changed = !(toep->ddp.flags & DDP_ON) ^ cpl->ddp_off;
1628 if (toep->ddp.waiting_count != 0 || toep->ddp.active_count != 0)
1629 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)",
1630 __func__, tid, len);
1633 if (toep->ddp.flags & DDP_SC_REQ)
1634 toep->ddp.flags ^= DDP_ON | DDP_SC_REQ;
1636 KASSERT(cpl->ddp_off == 1,
1637 ("%s: DDP switched on by itself.",
1640 /* Fell out of DDP mode */
1641 toep->ddp.flags &= ~DDP_ON;
1642 CTR1(KTR_CXGBE, "%s: fell out of DDP mode",
1645 insert_ddp_data(toep, ddp_placed);
1649 if (toep->ddp.flags & DDP_ON) {
1651 * CPL_RX_DATA with DDP on can only be an indicate.
1652 * Start posting queued AIO requests via DDP. The
1653 * payload that arrived in this indicate is appended
1654 * to the socket buffer as usual.
1656 handle_ddp_indicate(toep);
1660 sbappendstream_locked(sb, m, 0);
1661 rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
1662 if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
1663 rx_credits = send_rx_credits(sc, toep, rx_credits);
1664 tp->rcv_wnd += rx_credits;
1665 tp->rcv_adv += rx_credits;
1668 if (toep->ulp_mode == ULP_MODE_TCPDDP && toep->ddp.waiting_count > 0 &&
1670 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__,
1672 ddp_queue_toep(toep);
1674 sorwakeup_locked(so);
1675 SOCKBUF_UNLOCK_ASSERT(sb);
1676 if (toep->ulp_mode == ULP_MODE_TCPDDP)
1685 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1687 struct adapter *sc = iq->adapter;
1688 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
1689 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
1690 struct toepcb *toep = lookup_tid(sc, tid);
1694 uint8_t credits = cpl->credits;
1695 struct ofld_tx_sdesc *txsd;
1698 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl)));
1702 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and
1703 * now this comes back carrying the credits for the flowc.
1705 if (__predict_false(toep->flags & TPF_SYNQE)) {
1706 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1707 ("%s: credits for a synq entry %p", __func__, toep));
1713 KASSERT(opcode == CPL_FW4_ACK,
1714 ("%s: unexpected opcode 0x%x", __func__, opcode));
1715 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1716 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1720 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) {
1725 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0,
1726 ("%s: inp_flags 0x%x", __func__, inp->inp_flags));
1728 tp = intotcpcb(inp);
1730 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) {
1731 tcp_seq snd_una = be32toh(cpl->snd_una);
1734 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) {
1736 "%s: unexpected seq# %x for TID %u, snd_una %x\n",
1737 __func__, snd_una, toep->tid, tp->snd_una);
1741 if (tp->snd_una != snd_una) {
1742 tp->snd_una = snd_una;
1743 tp->ts_recent_age = tcp_ts_getticks();
1747 #ifdef VERBOSE_TRACES
1748 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits);
1750 so = inp->inp_socket;
1751 txsd = &toep->txsd[toep->txsd_cidx];
1754 KASSERT(credits >= txsd->tx_credits,
1755 ("%s: too many (or partial) credits", __func__));
1756 credits -= txsd->tx_credits;
1757 toep->tx_credits += txsd->tx_credits;
1759 if (txsd->iv_buffer) {
1760 free(txsd->iv_buffer, M_CXGBE);
1761 txsd->iv_buffer = NULL;
1765 KASSERT(toep->txsd_avail <= toep->txsd_total,
1766 ("%s: txsd avail > total", __func__));
1767 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) {
1768 txsd = &toep->txsd[0];
1769 toep->txsd_cidx = 0;
1773 if (toep->tx_credits == toep->tx_total) {
1774 toep->tx_nocompl = 0;
1775 toep->plen_nocompl = 0;
1778 if (toep->flags & TPF_TX_SUSPENDED &&
1779 toep->tx_credits >= toep->tx_total / 4) {
1780 #ifdef VERBOSE_TRACES
1781 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__,
1784 toep->flags &= ~TPF_TX_SUSPENDED;
1785 CURVNET_SET(toep->vnet);
1786 if (toep->ulp_mode == ULP_MODE_ISCSI)
1787 t4_push_pdus(sc, toep, plen);
1788 else if (tls_tx_key(toep))
1789 t4_push_tls_records(sc, toep, plen);
1791 t4_push_frames(sc, toep, plen);
1793 } else if (plen > 0) {
1794 struct sockbuf *sb = &so->so_snd;
1799 if (toep->ulp_mode == ULP_MODE_ISCSI) {
1801 if (__predict_false(sbu > 0)) {
1803 * The data trasmitted before the tid's ULP mode
1804 * changed to ISCSI is still in so_snd.
1805 * Incoming credits should account for so_snd
1808 sbdrop_locked(sb, min(sbu, plen));
1809 plen -= min(sbu, plen);
1811 sowwakeup_locked(so); /* unlocks so_snd */
1812 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen);
1814 #ifdef VERBOSE_TRACES
1815 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__,
1818 sbdrop_locked(sb, plen);
1819 if (tls_tx_key(toep)) {
1820 struct tls_ofld_info *tls_ofld = &toep->tls;
1822 MPASS(tls_ofld->sb_off >= plen);
1823 tls_ofld->sb_off -= plen;
1825 if (!TAILQ_EMPTY(&toep->aiotx_jobq))
1826 t4_aiotx_queue_toep(so, toep);
1827 sowwakeup_locked(so); /* unlocks so_snd */
1829 SOCKBUF_UNLOCK_ASSERT(sb);
1838 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
1839 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
1842 struct cpl_set_tcb_field *req;
1843 struct ofld_tx_sdesc *txsd;
1845 MPASS((cookie & ~M_COOKIE) == 0);
1847 MPASS(cookie != CPL_COOKIE_RESERVED);
1850 wr = alloc_wrqe(sizeof(*req), wrq);
1853 panic("%s: allocation failure.", __func__);
1857 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
1858 req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
1860 req->reply_ctrl |= htobe16(F_NO_REPLY);
1861 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
1862 req->mask = htobe64(mask);
1863 req->val = htobe64(val);
1864 if ((wrq->eq.flags & EQ_TYPEMASK) == EQ_OFLD) {
1865 txsd = &toep->txsd[toep->txsd_pidx];
1866 txsd->tx_credits = howmany(sizeof(*req), 16);
1868 KASSERT(toep->tx_credits >= txsd->tx_credits &&
1869 toep->txsd_avail > 0,
1870 ("%s: not enough credits (%d)", __func__,
1872 toep->tx_credits -= txsd->tx_credits;
1873 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
1874 toep->txsd_pidx = 0;
1882 t4_init_cpl_io_handlers(void)
1885 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
1886 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
1887 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
1888 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl,
1890 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
1891 t4_register_shared_cpl_handler(CPL_FW4_ACK, do_fw4_ack, CPL_COOKIE_TOM);
1895 t4_uninit_cpl_io_handlers(void)
1898 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL);
1899 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL);
1900 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL);
1901 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, NULL, CPL_COOKIE_TOM);
1902 t4_register_cpl_handler(CPL_RX_DATA, NULL);
1903 t4_register_shared_cpl_handler(CPL_FW4_ACK, NULL, CPL_COOKIE_TOM);
1907 * Use the 'backend3' field in AIO jobs to store the amount of data
1908 * sent by the AIO job so far and the 'backend4' field to hold an
1909 * error that should be reported when the job is completed.
1911 #define aio_sent backend3
1912 #define aio_error backend4
1914 #define jobtotid(job) \
1915 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid)
1918 free_aiotx_buffer(struct aiotx_buffer *ab)
1924 if (refcount_release(&ab->refcount) == 0)
1928 error = job->aio_error;
1929 status = job->aio_sent;
1930 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages);
1932 #ifdef VERBOSE_TRACES
1933 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__,
1934 jobtotid(job), job, status, error);
1936 if (error == ECANCELED && status != 0)
1938 if (error == ECANCELED)
1941 aio_complete(job, -1, error);
1943 aio_complete(job, status, 0);
1947 t4_aiotx_mbuf_free(struct mbuf *m)
1949 struct aiotx_buffer *ab = m->m_ext.ext_arg1;
1951 #ifdef VERBOSE_TRACES
1952 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__,
1953 m->m_len, jobtotid(ab->job));
1955 free_aiotx_buffer(ab);
1959 * Hold the buffer backing an AIO request and return an AIO transmit
1963 hold_aio(struct kaiocb *job)
1965 struct aiotx_buffer *ab;
1968 vm_offset_t start, end, pgoff;
1971 MPASS(job->backend1 == NULL);
1974 * The AIO subsystem will cancel and drain all requests before
1975 * permitting a process to exit or exec, so p_vmspace should
1978 vm = job->userproc->p_vmspace;
1980 start = (uintptr_t)job->uaiocb.aio_buf;
1981 pgoff = start & PAGE_MASK;
1982 end = round_page(start + job->uaiocb.aio_nbytes);
1983 start = trunc_page(start);
1984 n = atop(end - start);
1986 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
1988 refcount_init(&ab->refcount, 1);
1989 ab->ps.pages = (vm_page_t *)(ab + 1);
1990 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start,
1991 VM_PROT_WRITE, ab->ps.pages, n);
1992 if (ab->ps.npages < 0) {
1997 KASSERT(ab->ps.npages == n,
1998 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n));
2000 ab->ps.offset = pgoff;
2001 ab->ps.len = job->uaiocb.aio_nbytes;
2004 #ifdef VERBOSE_TRACES
2005 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
2006 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages);
2012 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
2017 struct aiotx_buffer *ab;
2022 bool moretocome, sendmore;
2024 sc = td_adapter(toep->td);
2032 error = mac_socket_check_send(fp->f_cred, so);
2038 error = hold_aio(job);
2044 /* Inline sosend_generic(). */
2048 error = sblock(sb, SBL_WAIT);
2052 m = m_get(M_WAITOK, MT_DATA);
2055 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2058 if ((so->so_options & SO_NOSIGPIPE) == 0) {
2059 PROC_LOCK(job->userproc);
2060 kern_psignal(job->userproc, SIGPIPE);
2061 PROC_UNLOCK(job->userproc);
2067 error = so->so_error;
2073 if ((so->so_state & SS_ISCONNECTED) == 0) {
2079 if (sbspace(sb) < sb->sb_lowat) {
2080 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO));
2083 * Don't block if there is too little room in the socket
2084 * buffer. Instead, requeue the request.
2086 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
2092 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
2099 * Write as much data as the socket permits, but no more than a
2100 * a single sndbuf at a time.
2102 m->m_len = sbspace(sb);
2103 if (m->m_len > ab->ps.len - job->aio_sent) {
2104 m->m_len = ab->ps.len - job->aio_sent;
2108 if (m->m_len > sc->tt.sndbuf) {
2109 m->m_len = sc->tt.sndbuf;
2114 if (!TAILQ_EMPTY(&toep->aiotx_jobq))
2117 MPASS(m->m_len != 0);
2119 /* Inlined tcp_usr_send(). */
2123 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2130 refcount_acquire(&ab->refcount);
2131 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab,
2132 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV);
2133 m->m_ext.ext_flags |= EXT_FLAG_AIOTX;
2134 job->aio_sent += m->m_len;
2136 sbappendstream(sb, m, 0);
2139 if (!(inp->inp_flags & INP_DROPPED)) {
2140 tp = intotcpcb(inp);
2142 tp->t_flags |= TF_MORETOCOME;
2143 error = tp->t_fb->tfb_tcp_output(tp);
2145 tp->t_flags &= ~TF_MORETOCOME;
2157 * If this is a non-blocking socket and the request has not
2158 * been fully completed, requeue it until the socket is ready
2161 if (job->aio_sent < job->uaiocb.aio_nbytes &&
2162 !(so->so_state & SS_NBIO)) {
2164 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
2169 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
2174 * If the request will not be requeued, drop a reference on
2175 * the aiotx buffer. Any mbufs in flight should still
2176 * contain a reference, but this drops the reference that the
2177 * job owns while it is waiting to queue mbufs to the socket.
2179 free_aiotx_buffer(ab);
2184 job->aio_error = error;
2185 free_aiotx_buffer(ab);
2187 MPASS(job->aio_sent == 0);
2188 aio_complete(job, -1, error);
2197 t4_aiotx_task(void *context, int pending)
2199 struct toepcb *toep = context;
2203 so = toep->aiotx_so;
2204 CURVNET_SET(toep->vnet);
2205 SOCKBUF_LOCK(&so->so_snd);
2206 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) {
2207 job = TAILQ_FIRST(&toep->aiotx_jobq);
2208 TAILQ_REMOVE(&toep->aiotx_jobq, job, list);
2209 if (!aio_clear_cancel_function(job))
2212 t4_aiotx_process_job(toep, so, job);
2214 toep->aiotx_so = NULL;
2215 SOCKBUF_UNLOCK(&so->so_snd);
2224 t4_aiotx_queue_toep(struct socket *so, struct toepcb *toep)
2227 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd);
2228 #ifdef VERBOSE_TRACES
2229 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s",
2230 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false");
2232 if (toep->aiotx_so != NULL)
2235 toep->aiotx_so = so;
2237 soaio_enqueue(&toep->aiotx_task);
2241 t4_aiotx_cancel(struct kaiocb *job)
2243 struct aiotx_buffer *ab;
2247 struct toepcb *toep;
2249 so = job->fd_file->f_data;
2250 tp = so_sototcpcb(so);
2252 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE);
2256 if (!aio_cancel_cleared(job))
2257 TAILQ_REMOVE(&toep->aiotx_jobq, job, list);
2262 free_aiotx_buffer(ab);
2268 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job)
2270 struct tcpcb *tp = so_sototcpcb(so);
2271 struct toepcb *toep = tp->t_toe;
2272 struct adapter *sc = td_adapter(toep->td);
2274 /* This only handles writes. */
2275 if (job->uaiocb.aio_lio_opcode != LIO_WRITE)
2276 return (EOPNOTSUPP);
2278 if (!sc->tt.tx_zcopy)
2279 return (EOPNOTSUPP);
2281 if (tls_tx_key(toep))
2282 return (EOPNOTSUPP);
2284 SOCKBUF_LOCK(&so->so_snd);
2285 #ifdef VERBOSE_TRACES
2286 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job);
2288 if (!aio_set_cancel_function(job, t4_aiotx_cancel))
2289 panic("new job was cancelled");
2290 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list);
2291 if (sowriteable(so))
2292 t4_aiotx_queue_toep(so, toep);
2293 SOCKBUF_UNLOCK(&so->so_snd);
2298 aiotx_init_toep(struct toepcb *toep)
2301 TAILQ_INIT(&toep->aiotx_jobq);
2302 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep);