2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include "opt_inet6.h"
35 #include "opt_ratelimit.h"
38 #include <sys/param.h>
41 #include <sys/kernel.h>
43 #include <sys/module.h>
45 #include <sys/protosw.h>
46 #include <sys/domain.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
49 #include <sys/sglist.h>
50 #include <sys/taskqueue.h>
51 #include <netinet/in.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
56 #include <netinet/tcp_fsm.h>
57 #include <netinet/tcp_seq.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/toecore.h>
61 #include <security/mac/mac_framework.h>
64 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_page.h>
69 #include "common/common.h"
70 #include "common/t4_msg.h"
71 #include "common/t4_regs.h"
72 #include "common/t4_tcb.h"
73 #include "tom/t4_tom_l2t.h"
74 #include "tom/t4_tom.h"
76 #define IS_AIOTX_MBUF(m) \
77 ((m)->m_flags & M_EXT && (m)->m_ext.ext_flags & EXT_FLAG_AIOTX)
79 static void t4_aiotx_cancel(struct kaiocb *job);
80 static void t4_aiotx_queue_toep(struct toepcb *toep);
83 aiotx_mbuf_pgoff(struct mbuf *m)
85 struct aiotx_buffer *ab;
87 MPASS(IS_AIOTX_MBUF(m));
88 ab = m->m_ext.ext_arg1;
89 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE);
93 aiotx_mbuf_pages(struct mbuf *m)
95 struct aiotx_buffer *ab;
98 MPASS(IS_AIOTX_MBUF(m));
99 ab = m->m_ext.ext_arg1;
100 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE;
101 return (ab->ps.pages + npages);
105 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
108 struct fw_flowc_wr *flowc;
109 unsigned int nparams = ftxp ? 8 : 6, flowclen;
110 struct vi_info *vi = toep->vi;
111 struct port_info *pi = vi->pi;
112 struct adapter *sc = pi->adapter;
113 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
114 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
116 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
117 ("%s: flowc for tid %u sent already", __func__, toep->tid));
119 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
121 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
124 panic("%s: allocation failure.", __func__);
127 memset(flowc, 0, wr->wr_len);
129 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
130 V_FW_FLOWC_WR_NPARAMS(nparams));
131 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
132 V_FW_WR_FLOWID(toep->tid));
134 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
135 flowc->mnemval[0].val = htobe32(pfvf);
136 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
137 flowc->mnemval[1].val = htobe32(pi->tx_chan);
138 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
139 flowc->mnemval[2].val = htobe32(pi->tx_chan);
140 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
141 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id);
143 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);
145 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
146 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt);
147 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
148 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt);
149 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
150 flowc->mnemval[6].val = htobe32(sndbuf);
151 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
152 flowc->mnemval[7].val = htobe32(ftxp->mss);
155 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
156 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt,
159 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
160 flowc->mnemval[4].val = htobe32(512);
161 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
162 flowc->mnemval[5].val = htobe32(512);
164 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
167 txsd->tx_credits = howmany(flowclen, 16);
169 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
170 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
171 toep->tx_credits -= txsd->tx_credits;
172 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
176 toep->flags |= TPF_FLOWC_WR_SENT;
182 * Input is Bytes/second (so_max_pacing-rate), chip counts in Kilobits/second.
185 update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
188 const u_int kbps = (u_int) (uint64_t)Bps * 8ULL / 1000;
189 const int port_id = toep->vi->pi->port_id;
191 CTR3(KTR_CXGBE, "%s: tid %u, rate %uKbps", __func__, toep->tid, kbps);
197 rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx);
200 MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
203 if (toep->tc_idx != tc_idx) {
205 struct fw_flowc_wr *flowc;
206 int nparams = 1, flowclen, flowclen16;
207 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
209 flowclen = sizeof(*flowc) + nparams * sizeof(struct
211 flowclen16 = howmany(flowclen, 16);
212 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 ||
213 (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) {
215 t4_release_cl_rl_kbps(sc, port_id, tc_idx);
220 memset(flowc, 0, wr->wr_len);
222 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
223 V_FW_FLOWC_WR_NPARAMS(nparams));
224 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) |
225 V_FW_WR_FLOWID(toep->tid));
227 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
229 flowc->mnemval[0].val = htobe32(0xff);
231 flowc->mnemval[0].val = htobe32(tc_idx);
233 txsd->tx_credits = flowclen16;
235 toep->tx_credits -= txsd->tx_credits;
236 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
242 if (toep->tc_idx >= 0)
243 t4_release_cl_rl_kbps(sc, port_id, toep->tc_idx);
244 toep->tc_idx = tc_idx;
251 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
254 struct cpl_abort_req *req;
256 struct inpcb *inp = toep->inp;
257 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */
259 INP_WLOCK_ASSERT(inp);
261 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s",
263 inp->inp_flags & INP_DROPPED ? "inp dropped" :
264 tcpstates[tp->t_state],
265 toep->flags, inp->inp_flags,
266 toep->flags & TPF_ABORT_SHUTDOWN ?
267 " (abort already in progress)" : "");
269 if (toep->flags & TPF_ABORT_SHUTDOWN)
270 return; /* abort already in progress */
272 toep->flags |= TPF_ABORT_SHUTDOWN;
274 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
275 ("%s: flowc_wr not sent for tid %d.", __func__, tid));
277 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
280 panic("%s: allocation failure.", __func__);
284 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid);
285 if (inp->inp_flags & INP_DROPPED)
286 req->rsvd0 = htobe32(snd_nxt);
288 req->rsvd0 = htobe32(tp->snd_nxt);
289 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT);
290 req->cmd = CPL_ABORT_SEND_RST;
293 * XXX: What's the correct way to tell that the inp hasn't been detached
294 * from its socket? Should I even be flushing the snd buffer here?
296 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
297 struct socket *so = inp->inp_socket;
299 if (so != NULL) /* because I'm not sure. See comment above */
300 sbflush(&so->so_snd);
303 t4_l2t_send(sc, wr, toep->l2te);
307 * Called when a connection is established to translate the TCP options
308 * reported by HW to FreeBSD's native format.
311 assign_rxopt(struct tcpcb *tp, unsigned int opt)
313 struct toepcb *toep = tp->t_toe;
314 struct inpcb *inp = tp->t_inpcb;
315 struct adapter *sc = td_adapter(toep->td);
318 INP_LOCK_ASSERT(inp);
320 if (inp->inp_inc.inc_flags & INC_ISIPV6)
321 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
323 n = sizeof(struct ip) + sizeof(struct tcphdr);
324 if (V_tcp_do_rfc1323)
325 n += TCPOLEN_TSTAMP_APPA;
326 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n;
328 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid,
329 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]);
331 if (G_TCPOPT_TSTAMP(opt)) {
332 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */
333 tp->ts_recent = 0; /* hmmm */
334 tp->ts_recent_age = tcp_ts_getticks();
337 if (G_TCPOPT_SACK(opt))
338 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */
340 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */
342 if (G_TCPOPT_WSCALE_OK(opt))
343 tp->t_flags |= TF_RCVD_SCALE;
345 /* Doing window scaling? */
346 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
347 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
348 tp->rcv_scale = tp->request_r_scale;
349 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt);
354 * Completes some final bits of initialization for just established connections
355 * and changes their state to TCPS_ESTABLISHED.
357 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1.
360 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
363 struct inpcb *inp = toep->inp;
364 struct socket *so = inp->inp_socket;
365 struct tcpcb *tp = intotcpcb(inp);
367 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */
368 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */
369 uint16_t tcpopt = be16toh(opt);
370 struct flowc_tx_params ftxp;
372 INP_WLOCK_ASSERT(inp);
373 KASSERT(tp->t_state == TCPS_SYN_SENT ||
374 tp->t_state == TCPS_SYN_RECEIVED,
375 ("%s: TCP state %s", __func__, tcpstates[tp->t_state]));
377 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p",
378 __func__, toep->tid, so, inp, tp, toep);
380 tp->t_state = TCPS_ESTABLISHED;
381 tp->t_starttime = ticks;
382 TCPSTAT_INC(tcps_connects);
386 tp->rcv_wnd = toep->rx_credits << 10;
387 tp->rcv_adv += tp->rcv_wnd;
388 tp->last_ack_sent = tp->rcv_nxt;
391 * If we were unable to send all rx credits via opt0, save the remainder
392 * in rx_credits so that they can be handed over with the next credit
395 SOCKBUF_LOCK(&so->so_rcv);
396 bufsize = select_rcv_wnd(so);
397 SOCKBUF_UNLOCK(&so->so_rcv);
398 toep->rx_credits = bufsize - tp->rcv_wnd;
402 tp->snd_una = iss + 1;
403 tp->snd_nxt = iss + 1;
404 tp->snd_max = iss + 1;
406 assign_rxopt(tp, tcpopt);
408 SOCKBUF_LOCK(&so->so_snd);
409 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf)
410 bufsize = V_tcp_autosndbuf_max;
412 bufsize = sbspace(&so->so_snd);
413 SOCKBUF_UNLOCK(&so->so_snd);
415 ftxp.snd_nxt = tp->snd_nxt;
416 ftxp.rcv_nxt = tp->rcv_nxt;
417 ftxp.snd_space = bufsize;
418 ftxp.mss = tp->t_maxseg;
419 send_flowc_wr(toep, &ftxp);
425 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits)
428 struct cpl_rx_data_ack *req;
429 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
431 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits));
433 wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
438 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid);
439 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits));
446 t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp)
448 struct adapter *sc = tod->tod_softc;
449 struct inpcb *inp = tp->t_inpcb;
450 struct socket *so = inp->inp_socket;
451 struct sockbuf *sb = &so->so_rcv;
452 struct toepcb *toep = tp->t_toe;
455 INP_WLOCK_ASSERT(inp);
457 SOCKBUF_LOCK_ASSERT(sb);
458 KASSERT(toep->sb_cc >= sbused(sb),
459 ("%s: sb %p has more data (%d) than last time (%d).",
460 __func__, sb, sbused(sb), toep->sb_cc));
462 toep->rx_credits += toep->sb_cc - sbused(sb);
463 toep->sb_cc = sbused(sb);
465 if (toep->rx_credits > 0 &&
466 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 ||
467 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) ||
468 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) {
470 credits = send_rx_credits(sc, toep, toep->rx_credits);
471 toep->rx_credits -= credits;
472 tp->rcv_wnd += credits;
473 tp->rcv_adv += credits;
478 t4_rcvd(struct toedev *tod, struct tcpcb *tp)
480 struct inpcb *inp = tp->t_inpcb;
481 struct socket *so = inp->inp_socket;
482 struct sockbuf *sb = &so->so_rcv;
485 t4_rcvd_locked(tod, tp);
490 * Close a connection by sending a CPL_CLOSE_CON_REQ message.
493 close_conn(struct adapter *sc, struct toepcb *toep)
496 struct cpl_close_con_req *req;
497 unsigned int tid = toep->tid;
499 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid,
500 toep->flags & TPF_FIN_SENT ? ", IGNORED" : "");
502 if (toep->flags & TPF_FIN_SENT)
505 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
506 ("%s: flowc_wr not sent for tid %u.", __func__, tid));
508 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
511 panic("%s: allocation failure.", __func__);
515 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) |
516 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr)));
517 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) |
518 V_FW_WR_FLOWID(tid));
519 req->wr.wr_lo = cpu_to_be64(0);
520 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
523 toep->flags |= TPF_FIN_SENT;
524 toep->flags &= ~TPF_SEND_FIN;
525 t4_l2t_send(sc, wr, toep->l2te);
530 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
531 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16))
533 /* Maximum amount of immediate data we could stuff in a WR */
535 max_imm_payload(int tx_credits)
537 const int n = 2; /* Use only up to 2 desc for imm. data WR */
539 KASSERT(tx_credits >= 0 &&
540 tx_credits <= MAX_OFLD_TX_CREDITS,
541 ("%s: %d credits", __func__, tx_credits));
543 if (tx_credits < MIN_OFLD_TX_CREDITS)
546 if (tx_credits >= (n * EQ_ESIZE) / 16)
547 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr));
549 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr));
552 /* Maximum number of SGL entries we could stuff in a WR */
554 max_dsgl_nsegs(int tx_credits)
556 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */
557 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS;
559 KASSERT(tx_credits >= 0 &&
560 tx_credits <= MAX_OFLD_TX_CREDITS,
561 ("%s: %d credits", __func__, tx_credits));
563 if (tx_credits < MIN_OFLD_TX_CREDITS)
566 nseg += 2 * (sge_pair_credits * 16 / 24);
567 if ((sge_pair_credits * 16) % 24 == 16)
574 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
575 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign)
577 struct fw_ofld_tx_data_wr *txwr = dst;
579 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) |
580 V_FW_WR_IMMDLEN(immdlen));
581 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
582 V_FW_WR_LEN16(credits));
583 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) |
584 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove));
585 txwr->plen = htobe32(plen);
588 struct tcpcb *tp = intotcpcb(toep->inp);
590 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi))
591 txwr->lsodisable_to_flags |=
592 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE);
594 txwr->lsodisable_to_flags |=
595 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD |
596 (tp->t_flags & TF_NODELAY ? 0 :
597 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE));
602 * Generate a DSGL from a starting mbuf. The total number of segments and the
603 * maximum segments in any one mbuf are provided.
606 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
609 struct ulptx_sgl *usgl = dst;
612 struct sglist_seg segs[n];
614 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
616 sglist_init(&sg, n, segs);
617 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
618 V_ULPTX_NSGE(nsegs));
621 for (m = start; m != stop; m = m->m_next) {
622 if (IS_AIOTX_MBUF(m))
623 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m),
624 aiotx_mbuf_pgoff(m), m->m_len);
626 rc = sglist_append(&sg, mtod(m, void *), m->m_len);
627 if (__predict_false(rc != 0))
628 panic("%s: sglist_append %d", __func__, rc);
630 for (j = 0; j < sg.sg_nseg; i++, j++) {
632 usgl->len0 = htobe32(segs[j].ss_len);
633 usgl->addr0 = htobe64(segs[j].ss_paddr);
635 usgl->sge[i / 2].len[i & 1] =
636 htobe32(segs[j].ss_len);
637 usgl->sge[i / 2].addr[i & 1] =
638 htobe64(segs[j].ss_paddr);
647 usgl->sge[i / 2].len[1] = htobe32(0);
648 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p",
649 __func__, nsegs, start, stop));
653 * Max number of SGL entries an offload tx work request can have. This is 41
654 * (1 + 40) for a full 512B work request.
655 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40)
657 #define OFLD_SGL_LEN (41)
660 * Send data and/or a FIN to the peer.
662 * The socket's so_snd buffer consists of a stream of data starting with sb_mb
663 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that
666 * drop indicates the number of bytes that should be dropped from the head of
667 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating
668 * contention on the send buffer lock (before this change it used to do
669 * sowwakeup and then t4_push_frames right after that when recovering from tx
670 * stalls). When drop is set this function MUST drop the bytes and wake up any
674 t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
676 struct mbuf *sndptr, *m, *sb_sndptr;
677 struct fw_ofld_tx_data_wr *txwr;
679 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
680 struct inpcb *inp = toep->inp;
681 struct tcpcb *tp = intotcpcb(inp);
682 struct socket *so = inp->inp_socket;
683 struct sockbuf *sb = &so->so_snd;
684 int tx_credits, shove, compl, sowwakeup;
685 struct ofld_tx_sdesc *txsd;
686 bool aiotx_mbuf_seen;
688 INP_WLOCK_ASSERT(inp);
689 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
690 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
692 KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
693 toep->ulp_mode == ULP_MODE_TCPDDP ||
694 toep->ulp_mode == ULP_MODE_RDMA,
695 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
697 #ifdef VERBOSE_TRACES
698 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
699 __func__, toep->tid, toep->flags, tp->t_flags);
701 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
705 if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
706 (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
707 inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
712 * This function doesn't resume by itself. Someone else must clear the
713 * flag and call this function.
715 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
717 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
721 txsd = &toep->txsd[toep->txsd_pidx];
723 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
724 max_imm = max_imm_payload(tx_credits);
725 max_nsegs = max_dsgl_nsegs(tx_credits);
730 sbdrop_locked(sb, drop);
733 sb_sndptr = sb->sb_sndptr;
734 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb;
737 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
738 aiotx_mbuf_seen = false;
739 for (m = sndptr; m != NULL; m = m->m_next) {
742 if (IS_AIOTX_MBUF(m))
743 n = sglist_count_vmpages(aiotx_mbuf_pages(m),
744 aiotx_mbuf_pgoff(m), m->m_len);
746 n = sglist_count(mtod(m, void *), m->m_len);
751 /* This mbuf sent us _over_ the nsegs limit, back out */
752 if (plen > max_imm && nsegs > max_nsegs) {
756 /* Too few credits */
757 toep->flags |= TPF_TX_SUSPENDED;
763 sowwakeup_locked(so);
766 SOCKBUF_UNLOCK_ASSERT(sb);
772 if (IS_AIOTX_MBUF(m))
773 aiotx_mbuf_seen = true;
774 if (max_nsegs_1mbuf < n)
776 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */
778 /* This mbuf put us right at the max_nsegs limit */
779 if (plen > max_imm && nsegs == max_nsegs) {
785 if (sbused(sb) > sb->sb_hiwat * 5 / 8 &&
786 toep->plen_nocompl + plen >= sb->sb_hiwat / 4)
791 if (sb->sb_flags & SB_AUTOSIZE &&
792 V_tcp_do_autosndbuf &&
793 sb->sb_hiwat < V_tcp_autosndbuf_max &&
794 sbused(sb) >= sb->sb_hiwat * 7 / 8) {
795 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
796 V_tcp_autosndbuf_max);
798 if (!sbreserve_locked(sb, newsize, so, NULL))
799 sb->sb_flags &= ~SB_AUTOSIZE;
801 sowwakeup = 1; /* room available */
804 if (!TAILQ_EMPTY(&toep->aiotx_jobq))
805 t4_aiotx_queue_toep(toep);
806 sowwakeup_locked(so);
809 SOCKBUF_UNLOCK_ASSERT(sb);
811 /* nothing to send */
814 ("%s: nothing to send, but m != NULL", __func__));
818 if (__predict_false(toep->flags & TPF_FIN_SENT))
819 panic("%s: excess tx.", __func__);
821 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME);
822 if (plen <= max_imm && !aiotx_mbuf_seen) {
824 /* Immediate data tx */
826 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
829 /* XXX: how will we recover from this? */
830 toep->flags |= TPF_TX_SUSPENDED;
834 credits = howmany(wr->wr_len, 16);
835 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0,
837 m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
844 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
845 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
846 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
848 /* XXX: how will we recover from this? */
849 toep->flags |= TPF_TX_SUSPENDED;
853 credits = howmany(wr_len, 16);
854 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0,
856 write_tx_sgl(txwr + 1, sndptr, m, nsegs,
859 uint64_t *pad = (uint64_t *)
860 ((uintptr_t)txwr + wr_len);
865 KASSERT(toep->tx_credits >= credits,
866 ("%s: not enough credits", __func__));
868 toep->tx_credits -= credits;
869 toep->tx_nocompl += credits;
870 toep->plen_nocompl += plen;
871 if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
872 toep->tx_nocompl >= toep->tx_total / 4)
875 if (compl || toep->ulp_mode == ULP_MODE_RDMA) {
876 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
877 toep->tx_nocompl = 0;
878 toep->plen_nocompl = 0;
885 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__));
886 sb->sb_sndptr = sb_sndptr;
889 toep->flags |= TPF_TX_DATA_SENT;
890 if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
891 toep->flags |= TPF_TX_SUSPENDED;
893 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
895 txsd->tx_credits = credits;
897 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
899 txsd = &toep->txsd[0];
903 t4_l2t_send(sc, wr, toep->l2te);
906 /* Send a FIN if requested, but only if there's no more data to send */
907 if (m == NULL && toep->flags & TPF_SEND_FIN)
908 close_conn(sc, toep);
912 rqdrop_locked(struct mbufq *q, int plen)
917 m = mbufq_dequeue(q);
919 /* Too many credits. */
923 /* Partial credits. */
924 MPASS(plen >= m->m_pkthdr.len);
926 plen -= m->m_pkthdr.len;
932 t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
934 struct mbuf *sndptr, *m;
935 struct fw_ofld_tx_data_wr *txwr;
937 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
938 u_int adjusted_plen, ulp_submode;
939 struct inpcb *inp = toep->inp;
940 struct tcpcb *tp = intotcpcb(inp);
941 int tx_credits, shove;
942 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
943 struct mbufq *pduq = &toep->ulp_pduq;
944 static const u_int ulp_extra_len[] = {0, 4, 4, 8};
946 INP_WLOCK_ASSERT(inp);
947 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
948 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
949 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
950 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
952 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
956 * This function doesn't resume by itself. Someone else must clear the
957 * flag and call this function.
959 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
961 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
966 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop);
968 while ((sndptr = mbufq_first(pduq)) != NULL) {
969 M_ASSERTPKTHDR(sndptr);
971 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
972 max_imm = max_imm_payload(tx_credits);
973 max_nsegs = max_dsgl_nsegs(tx_credits);
977 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
978 for (m = sndptr; m != NULL; m = m->m_next) {
979 int n = sglist_count(mtod(m, void *), m->m_len);
985 * This mbuf would send us _over_ the nsegs limit.
986 * Suspend tx because the PDU can't be sent out.
988 if (plen > max_imm && nsegs > max_nsegs) {
989 toep->flags |= TPF_TX_SUSPENDED;
993 if (max_nsegs_1mbuf < n)
997 if (__predict_false(toep->flags & TPF_FIN_SENT))
998 panic("%s: excess tx.", __func__);
1001 * We have a PDU to send. All of it goes out in one WR so 'm'
1002 * is NULL. A PDU's length is always a multiple of 4.
1005 MPASS((plen & 3) == 0);
1006 MPASS(sndptr->m_pkthdr.len == plen);
1008 shove = !(tp->t_flags & TF_MORETOCOME);
1009 ulp_submode = mbuf_ulp_submode(sndptr);
1010 MPASS(ulp_submode < nitems(ulp_extra_len));
1013 * plen doesn't include header and data digests, which are
1014 * generated and inserted in the right places by the TOE, but
1015 * they do occupy TCP sequence space and need to be accounted
1018 adjusted_plen = plen + ulp_extra_len[ulp_submode];
1019 if (plen <= max_imm) {
1021 /* Immediate data tx */
1023 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
1026 /* XXX: how will we recover from this? */
1027 toep->flags |= TPF_TX_SUSPENDED;
1031 credits = howmany(wr->wr_len, 16);
1032 write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
1033 shove, ulp_submode, sc->tt.tx_align);
1034 m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
1040 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
1041 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1042 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1044 /* XXX: how will we recover from this? */
1045 toep->flags |= TPF_TX_SUSPENDED;
1049 credits = howmany(wr_len, 16);
1050 write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
1051 shove, ulp_submode, sc->tt.tx_align);
1052 write_tx_sgl(txwr + 1, sndptr, m, nsegs,
1055 uint64_t *pad = (uint64_t *)
1056 ((uintptr_t)txwr + wr_len);
1061 KASSERT(toep->tx_credits >= credits,
1062 ("%s: not enough credits", __func__));
1064 m = mbufq_dequeue(pduq);
1066 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m);
1068 toep->tx_credits -= credits;
1069 toep->tx_nocompl += credits;
1070 toep->plen_nocompl += plen;
1071 if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
1072 toep->tx_nocompl >= toep->tx_total / 4) {
1073 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
1074 toep->tx_nocompl = 0;
1075 toep->plen_nocompl = 0;
1078 tp->snd_nxt += adjusted_plen;
1079 tp->snd_max += adjusted_plen;
1081 toep->flags |= TPF_TX_DATA_SENT;
1082 if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
1083 toep->flags |= TPF_TX_SUSPENDED;
1085 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1087 txsd->tx_credits = credits;
1089 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1090 toep->txsd_pidx = 0;
1091 txsd = &toep->txsd[0];
1095 t4_l2t_send(sc, wr, toep->l2te);
1098 /* Send a FIN if requested, but only if there are no more PDUs to send */
1099 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN)
1100 close_conn(sc, toep);
1104 t4_tod_output(struct toedev *tod, struct tcpcb *tp)
1106 struct adapter *sc = tod->tod_softc;
1108 struct inpcb *inp = tp->t_inpcb;
1110 struct toepcb *toep = tp->t_toe;
1112 INP_WLOCK_ASSERT(inp);
1113 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1114 ("%s: inp %p dropped.", __func__, inp));
1115 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1117 if (toep->ulp_mode == ULP_MODE_ISCSI)
1118 t4_push_pdus(sc, toep, 0);
1120 t4_push_frames(sc, toep, 0);
1126 t4_send_fin(struct toedev *tod, struct tcpcb *tp)
1128 struct adapter *sc = tod->tod_softc;
1130 struct inpcb *inp = tp->t_inpcb;
1132 struct toepcb *toep = tp->t_toe;
1134 INP_WLOCK_ASSERT(inp);
1135 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1136 ("%s: inp %p dropped.", __func__, inp));
1137 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1139 toep->flags |= TPF_SEND_FIN;
1140 if (tp->t_state >= TCPS_ESTABLISHED) {
1141 if (toep->ulp_mode == ULP_MODE_ISCSI)
1142 t4_push_pdus(sc, toep, 0);
1144 t4_push_frames(sc, toep, 0);
1151 t4_send_rst(struct toedev *tod, struct tcpcb *tp)
1153 struct adapter *sc = tod->tod_softc;
1154 #if defined(INVARIANTS)
1155 struct inpcb *inp = tp->t_inpcb;
1157 struct toepcb *toep = tp->t_toe;
1159 INP_WLOCK_ASSERT(inp);
1160 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1161 ("%s: inp %p dropped.", __func__, inp));
1162 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1165 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1166 ("%s: flowc for tid %u [%s] not sent already",
1167 __func__, toep->tid, tcpstates[tp->t_state]));
1169 send_reset(sc, toep, 0);
1174 * Peer has sent us a FIN.
1177 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1179 struct adapter *sc = iq->adapter;
1180 const struct cpl_peer_close *cpl = (const void *)(rss + 1);
1181 unsigned int tid = GET_TID(cpl);
1182 struct toepcb *toep = lookup_tid(sc, tid);
1183 struct inpcb *inp = toep->inp;
1184 struct tcpcb *tp = NULL;
1187 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1190 KASSERT(opcode == CPL_PEER_CLOSE,
1191 ("%s: unexpected opcode 0x%x", __func__, opcode));
1192 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1194 if (__predict_false(toep->flags & TPF_SYNQE)) {
1196 struct synq_entry *synqe = (void *)toep;
1198 INP_WLOCK(synqe->lctx->inp);
1199 if (synqe->flags & TPF_SYNQE_HAS_L2TE) {
1200 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN,
1201 ("%s: listen socket closed but tid %u not aborted.",
1205 * do_pass_accept_req is still running and will
1206 * eventually take care of this tid.
1209 INP_WUNLOCK(synqe->lctx->inp);
1211 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1216 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1218 CURVNET_SET(toep->vnet);
1219 INP_INFO_RLOCK(&V_tcbinfo);
1221 tp = intotcpcb(inp);
1223 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__,
1224 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp);
1226 if (toep->flags & TPF_ABORT_SHUTDOWN)
1229 tp->rcv_nxt++; /* FIN */
1231 so = inp->inp_socket;
1232 if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1234 if (__predict_false(toep->ddp_flags &
1235 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)))
1236 handle_ddp_close(toep, tp, cpl->rcv_nxt);
1241 if (toep->ulp_mode != ULP_MODE_RDMA) {
1242 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
1243 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
1244 be32toh(cpl->rcv_nxt)));
1247 switch (tp->t_state) {
1248 case TCPS_SYN_RECEIVED:
1249 tp->t_starttime = ticks;
1252 case TCPS_ESTABLISHED:
1253 tp->t_state = TCPS_CLOSE_WAIT;
1256 case TCPS_FIN_WAIT_1:
1257 tp->t_state = TCPS_CLOSING;
1260 case TCPS_FIN_WAIT_2:
1262 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
1263 INP_INFO_RUNLOCK(&V_tcbinfo);
1267 final_cpl_received(toep);
1271 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n",
1272 __func__, tid, tp->t_state);
1276 INP_INFO_RUNLOCK(&V_tcbinfo);
1282 * Peer has ACK'd our FIN.
1285 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
1288 struct adapter *sc = iq->adapter;
1289 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1);
1290 unsigned int tid = GET_TID(cpl);
1291 struct toepcb *toep = lookup_tid(sc, tid);
1292 struct inpcb *inp = toep->inp;
1293 struct tcpcb *tp = NULL;
1294 struct socket *so = NULL;
1296 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1299 KASSERT(opcode == CPL_CLOSE_CON_RPL,
1300 ("%s: unexpected opcode 0x%x", __func__, opcode));
1301 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1302 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1304 CURVNET_SET(toep->vnet);
1305 INP_INFO_RLOCK(&V_tcbinfo);
1307 tp = intotcpcb(inp);
1309 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x",
1310 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags);
1312 if (toep->flags & TPF_ABORT_SHUTDOWN)
1315 so = inp->inp_socket;
1316 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */
1318 switch (tp->t_state) {
1319 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */
1322 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
1323 INP_INFO_RUNLOCK(&V_tcbinfo);
1327 final_cpl_received(toep); /* no more CPLs expected */
1335 case TCPS_FIN_WAIT_1:
1336 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
1337 soisdisconnected(so);
1338 tp->t_state = TCPS_FIN_WAIT_2;
1343 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n",
1344 __func__, tid, tcpstates[tp->t_state]);
1348 INP_INFO_RUNLOCK(&V_tcbinfo);
1354 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid,
1358 struct cpl_abort_rpl *cpl;
1360 wr = alloc_wrqe(sizeof(*cpl), ofld_txq);
1363 panic("%s: allocation failure.", __func__);
1367 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid);
1368 cpl->cmd = rst_status;
1374 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason)
1376 switch (abort_reason) {
1377 case CPL_ERR_BAD_SYN:
1378 case CPL_ERR_CONN_RESET:
1379 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET);
1380 case CPL_ERR_XMIT_TIMEDOUT:
1381 case CPL_ERR_PERSIST_TIMEDOUT:
1382 case CPL_ERR_FINWAIT2_TIMEDOUT:
1383 case CPL_ERR_KEEPALIVE_TIMEDOUT:
1391 * TCP RST from the peer, timeout, or some other such critical error.
1394 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1396 struct adapter *sc = iq->adapter;
1397 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1);
1398 unsigned int tid = GET_TID(cpl);
1399 struct toepcb *toep = lookup_tid(sc, tid);
1400 struct sge_wrq *ofld_txq = toep->ofld_txq;
1404 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1407 KASSERT(opcode == CPL_ABORT_REQ_RSS,
1408 ("%s: unexpected opcode 0x%x", __func__, opcode));
1409 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1411 if (toep->flags & TPF_SYNQE)
1412 return (do_abort_req_synqe(iq, rss, m));
1414 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1416 if (negative_advice(cpl->status)) {
1417 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)",
1418 __func__, cpl->status, tid, toep->flags);
1419 return (0); /* Ignore negative advice */
1423 CURVNET_SET(toep->vnet);
1424 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */
1427 tp = intotcpcb(inp);
1430 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d",
1431 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags,
1432 inp->inp_flags, cpl->status);
1435 * If we'd initiated an abort earlier the reply to it is responsible for
1436 * cleaning up resources. Otherwise we tear everything down right here
1437 * right now. We owe the T4 a CPL_ABORT_RPL no matter what.
1439 if (toep->flags & TPF_ABORT_SHUTDOWN) {
1443 toep->flags |= TPF_ABORT_SHUTDOWN;
1445 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
1446 struct socket *so = inp->inp_socket;
1449 so_error_set(so, abort_status_to_errno(tp,
1453 INP_WLOCK(inp); /* re-acquire */
1456 final_cpl_received(toep);
1458 INP_INFO_RUNLOCK(&V_tcbinfo);
1460 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
1465 * Reply to the CPL_ABORT_REQ (send_reset)
1468 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1470 struct adapter *sc = iq->adapter;
1471 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1472 unsigned int tid = GET_TID(cpl);
1473 struct toepcb *toep = lookup_tid(sc, tid);
1474 struct inpcb *inp = toep->inp;
1476 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1479 KASSERT(opcode == CPL_ABORT_RPL_RSS,
1480 ("%s: unexpected opcode 0x%x", __func__, opcode));
1481 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1483 if (toep->flags & TPF_SYNQE)
1484 return (do_abort_rpl_synqe(iq, rss, m));
1486 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1488 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d",
1489 __func__, tid, toep, inp, cpl->status);
1491 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1492 ("%s: wasn't expecting abort reply", __func__));
1495 final_cpl_received(toep);
1501 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1503 struct adapter *sc = iq->adapter;
1504 const struct cpl_rx_data *cpl = mtod(m, const void *);
1505 unsigned int tid = GET_TID(cpl);
1506 struct toepcb *toep = lookup_tid(sc, tid);
1507 struct inpcb *inp = toep->inp;
1512 uint32_t ddp_placed = 0;
1514 if (__predict_false(toep->flags & TPF_SYNQE)) {
1516 struct synq_entry *synqe = (void *)toep;
1518 INP_WLOCK(synqe->lctx->inp);
1519 if (synqe->flags & TPF_SYNQE_HAS_L2TE) {
1520 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN,
1521 ("%s: listen socket closed but tid %u not aborted.",
1525 * do_pass_accept_req is still running and will
1526 * eventually take care of this tid.
1529 INP_WUNLOCK(synqe->lctx->inp);
1531 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1537 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1539 /* strip off CPL header */
1540 m_adj(m, sizeof(*cpl));
1541 len = m->m_pkthdr.len;
1544 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1545 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1546 __func__, tid, len, inp->inp_flags);
1552 tp = intotcpcb(inp);
1554 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq)))
1555 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt;
1558 if (tp->rcv_wnd < len) {
1559 KASSERT(toep->ulp_mode == ULP_MODE_RDMA,
1560 ("%s: negative window size", __func__));
1564 tp->t_rcvtime = ticks;
1566 if (toep->ulp_mode == ULP_MODE_TCPDDP)
1568 so = inp_inpcbtosocket(inp);
1572 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1573 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1574 __func__, tid, len);
1577 if (toep->ulp_mode == ULP_MODE_TCPDDP)
1581 CURVNET_SET(toep->vnet);
1582 INP_INFO_RLOCK(&V_tcbinfo);
1584 tp = tcp_drop(tp, ECONNRESET);
1587 INP_INFO_RUNLOCK(&V_tcbinfo);
1593 /* receive buffer autosize */
1594 MPASS(toep->vnet == so->so_vnet);
1595 CURVNET_SET(toep->vnet);
1596 if (sb->sb_flags & SB_AUTOSIZE &&
1597 V_tcp_do_autorcvbuf &&
1598 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1599 len > (sbspace(sb) / 8 * 7)) {
1600 unsigned int hiwat = sb->sb_hiwat;
1601 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
1602 V_tcp_autorcvbuf_max);
1604 if (!sbreserve_locked(sb, newsize, so, NULL))
1605 sb->sb_flags &= ~SB_AUTOSIZE;
1607 toep->rx_credits += newsize - hiwat;
1610 if (toep->ddp_waiting_count != 0 || toep->ddp_active_count != 0)
1611 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__,
1614 if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1615 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off;
1618 if (toep->ddp_flags & DDP_SC_REQ)
1619 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ;
1621 KASSERT(cpl->ddp_off == 1,
1622 ("%s: DDP switched on by itself.",
1625 /* Fell out of DDP mode */
1626 toep->ddp_flags &= ~DDP_ON;
1627 CTR1(KTR_CXGBE, "%s: fell out of DDP mode",
1630 insert_ddp_data(toep, ddp_placed);
1634 if (toep->ddp_flags & DDP_ON) {
1636 * CPL_RX_DATA with DDP on can only be an indicate.
1637 * Start posting queued AIO requests via DDP. The
1638 * payload that arrived in this indicate is appended
1639 * to the socket buffer as usual.
1641 handle_ddp_indicate(toep);
1645 KASSERT(toep->sb_cc >= sbused(sb),
1646 ("%s: sb %p has more data (%d) than last time (%d).",
1647 __func__, sb, sbused(sb), toep->sb_cc));
1648 toep->rx_credits += toep->sb_cc - sbused(sb);
1649 sbappendstream_locked(sb, m, 0);
1650 toep->sb_cc = sbused(sb);
1651 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) {
1654 credits = send_rx_credits(sc, toep, toep->rx_credits);
1655 toep->rx_credits -= credits;
1656 tp->rcv_wnd += credits;
1657 tp->rcv_adv += credits;
1660 if (toep->ddp_waiting_count > 0 && sbavail(sb) != 0) {
1661 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__,
1663 ddp_queue_toep(toep);
1665 sorwakeup_locked(so);
1666 SOCKBUF_UNLOCK_ASSERT(sb);
1667 if (toep->ulp_mode == ULP_MODE_TCPDDP)
1675 #define S_CPL_FW4_ACK_OPCODE 24
1676 #define M_CPL_FW4_ACK_OPCODE 0xff
1677 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE)
1678 #define G_CPL_FW4_ACK_OPCODE(x) \
1679 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE)
1681 #define S_CPL_FW4_ACK_FLOWID 0
1682 #define M_CPL_FW4_ACK_FLOWID 0xffffff
1683 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID)
1684 #define G_CPL_FW4_ACK_FLOWID(x) \
1685 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID)
1687 #define S_CPL_FW4_ACK_CR 24
1688 #define M_CPL_FW4_ACK_CR 0xff
1689 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR)
1690 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR)
1692 #define S_CPL_FW4_ACK_SEQVAL 0
1693 #define M_CPL_FW4_ACK_SEQVAL 0x1
1694 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL)
1695 #define G_CPL_FW4_ACK_SEQVAL(x) \
1696 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL)
1697 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U)
1700 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1702 struct adapter *sc = iq->adapter;
1703 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
1704 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
1705 struct toepcb *toep = lookup_tid(sc, tid);
1709 uint8_t credits = cpl->credits;
1710 struct ofld_tx_sdesc *txsd;
1713 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl)));
1717 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and
1718 * now this comes back carrying the credits for the flowc.
1720 if (__predict_false(toep->flags & TPF_SYNQE)) {
1721 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1722 ("%s: credits for a synq entry %p", __func__, toep));
1728 KASSERT(opcode == CPL_FW4_ACK,
1729 ("%s: unexpected opcode 0x%x", __func__, opcode));
1730 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1731 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1735 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) {
1740 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0,
1741 ("%s: inp_flags 0x%x", __func__, inp->inp_flags));
1743 tp = intotcpcb(inp);
1745 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) {
1746 tcp_seq snd_una = be32toh(cpl->snd_una);
1749 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) {
1751 "%s: unexpected seq# %x for TID %u, snd_una %x\n",
1752 __func__, snd_una, toep->tid, tp->snd_una);
1756 if (tp->snd_una != snd_una) {
1757 tp->snd_una = snd_una;
1758 tp->ts_recent_age = tcp_ts_getticks();
1762 #ifdef VERBOSE_TRACES
1763 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits);
1765 so = inp->inp_socket;
1766 txsd = &toep->txsd[toep->txsd_cidx];
1769 KASSERT(credits >= txsd->tx_credits,
1770 ("%s: too many (or partial) credits", __func__));
1771 credits -= txsd->tx_credits;
1772 toep->tx_credits += txsd->tx_credits;
1776 KASSERT(toep->txsd_avail <= toep->txsd_total,
1777 ("%s: txsd avail > total", __func__));
1778 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) {
1779 txsd = &toep->txsd[0];
1780 toep->txsd_cidx = 0;
1784 if (toep->tx_credits == toep->tx_total) {
1785 toep->tx_nocompl = 0;
1786 toep->plen_nocompl = 0;
1789 if (toep->flags & TPF_TX_SUSPENDED &&
1790 toep->tx_credits >= toep->tx_total / 4) {
1791 #ifdef VERBOSE_TRACES
1792 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__,
1795 toep->flags &= ~TPF_TX_SUSPENDED;
1796 CURVNET_SET(toep->vnet);
1797 if (toep->ulp_mode == ULP_MODE_ISCSI)
1798 t4_push_pdus(sc, toep, plen);
1800 t4_push_frames(sc, toep, plen);
1802 } else if (plen > 0) {
1803 struct sockbuf *sb = &so->so_snd;
1808 if (toep->ulp_mode == ULP_MODE_ISCSI) {
1810 if (__predict_false(sbu > 0)) {
1812 * The data trasmitted before the tid's ULP mode
1813 * changed to ISCSI is still in so_snd.
1814 * Incoming credits should account for so_snd
1817 sbdrop_locked(sb, min(sbu, plen));
1818 plen -= min(sbu, plen);
1820 sowwakeup_locked(so); /* unlocks so_snd */
1821 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen);
1823 #ifdef VERBOSE_TRACES
1824 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__,
1827 sbdrop_locked(sb, plen);
1828 if (!TAILQ_EMPTY(&toep->aiotx_jobq))
1829 t4_aiotx_queue_toep(toep);
1830 sowwakeup_locked(so); /* unlocks so_snd */
1832 SOCKBUF_UNLOCK_ASSERT(sb);
1841 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1843 struct adapter *sc = iq->adapter;
1844 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1845 unsigned int tid = GET_TID(cpl);
1846 struct toepcb *toep;
1848 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1851 KASSERT(opcode == CPL_SET_TCB_RPL,
1852 ("%s: unexpected opcode 0x%x", __func__, opcode));
1853 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1854 MPASS(iq != &sc->sge.fwq);
1856 toep = lookup_tid(sc, tid);
1857 if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1858 handle_ddp_tcb_rpl(toep, cpl);
1863 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or
1864 * CPL_SET_TCB_FIELD requests. This can easily change and when it does
1865 * the dispatch code will go here.
1868 panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__,
1871 log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n",
1879 t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid,
1880 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid)
1883 struct cpl_set_tcb_field *req;
1885 MPASS((cookie & ~M_COOKIE) == 0);
1886 MPASS((iqid & ~M_QUEUENO) == 0);
1888 wr = alloc_wrqe(sizeof(*req), wrq);
1891 panic("%s: allocation failure.", __func__);
1895 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1896 req->reply_ctrl = htobe16(V_QUEUENO(iqid));
1898 req->reply_ctrl |= htobe16(F_NO_REPLY);
1899 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
1900 req->mask = htobe64(mask);
1901 req->val = htobe64(val);
1907 t4_init_cpl_io_handlers(void)
1910 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
1911 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
1912 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
1913 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
1914 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
1915 t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
1919 t4_uninit_cpl_io_handlers(void)
1922 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL);
1923 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL);
1924 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL);
1925 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL);
1926 t4_register_cpl_handler(CPL_RX_DATA, NULL);
1927 t4_register_cpl_handler(CPL_FW4_ACK, NULL);
1931 * Use the 'backend3' field in AIO jobs to store the amount of data
1932 * sent by the AIO job so far and the 'backend4' field to hold an
1933 * error that should be reported when the job is completed.
1935 #define aio_sent backend3
1936 #define aio_error backend4
1938 #define jobtotid(job) \
1939 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid)
1942 free_aiotx_buffer(struct aiotx_buffer *ab)
1948 if (refcount_release(&ab->refcount) == 0)
1952 error = job->aio_error;
1953 status = job->aio_sent;
1954 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages);
1956 #ifdef VERBOSE_TRACES
1957 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__,
1958 jobtotid(job), job, status, error);
1960 if (error == ECANCELED && status != 0)
1962 if (error == ECANCELED)
1965 aio_complete(job, -1, error);
1967 aio_complete(job, status, 0);
1971 t4_aiotx_mbuf_free(struct mbuf *m)
1973 struct aiotx_buffer *ab = m->m_ext.ext_arg1;
1975 #ifdef VERBOSE_TRACES
1976 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__,
1977 m->m_len, jobtotid(ab->job));
1979 free_aiotx_buffer(ab);
1983 * Hold the buffer backing an AIO request and return an AIO transmit
1987 hold_aio(struct kaiocb *job)
1989 struct aiotx_buffer *ab;
1992 vm_offset_t start, end, pgoff;
1995 MPASS(job->backend1 == NULL);
1998 * The AIO subsystem will cancel and drain all requests before
1999 * permitting a process to exit or exec, so p_vmspace should
2002 vm = job->userproc->p_vmspace;
2004 start = (uintptr_t)job->uaiocb.aio_buf;
2005 pgoff = start & PAGE_MASK;
2006 end = round_page(start + job->uaiocb.aio_nbytes);
2007 start = trunc_page(start);
2008 n = atop(end - start);
2010 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
2012 refcount_init(&ab->refcount, 1);
2013 ab->ps.pages = (vm_page_t *)(ab + 1);
2014 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start,
2015 VM_PROT_WRITE, ab->ps.pages, n);
2016 if (ab->ps.npages < 0) {
2021 KASSERT(ab->ps.npages == n,
2022 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n));
2024 ab->ps.offset = pgoff;
2025 ab->ps.len = job->uaiocb.aio_nbytes;
2028 #ifdef VERBOSE_TRACES
2029 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
2030 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages);
2036 t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
2041 struct aiotx_buffer *ab;
2046 bool moretocome, sendmore;
2048 sc = td_adapter(toep->td);
2056 error = mac_socket_check_send(fp->f_cred, so);
2062 error = hold_aio(job);
2068 /* Inline sosend_generic(). */
2072 error = sblock(sb, SBL_WAIT);
2076 m = m_get(M_WAITOK, MT_DATA);
2079 if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
2082 if ((so->so_options & SO_NOSIGPIPE) == 0) {
2083 PROC_LOCK(job->userproc);
2084 kern_psignal(job->userproc, SIGPIPE);
2085 PROC_UNLOCK(job->userproc);
2091 error = so->so_error;
2097 if ((so->so_state & SS_ISCONNECTED) == 0) {
2103 if (sbspace(sb) < sb->sb_lowat) {
2104 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO));
2107 * Don't block if there is too little room in the socket
2108 * buffer. Instead, requeue the request.
2110 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
2116 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
2123 * Write as much data as the socket permits, but no more than a
2124 * a single sndbuf at a time.
2126 m->m_len = sbspace(sb);
2127 if (m->m_len > ab->ps.len - job->aio_sent) {
2128 m->m_len = ab->ps.len - job->aio_sent;
2132 if (m->m_len > sc->tt.sndbuf) {
2133 m->m_len = sc->tt.sndbuf;
2138 if (!TAILQ_EMPTY(&toep->aiotx_jobq))
2141 MPASS(m->m_len != 0);
2143 /* Inlined tcp_usr_send(). */
2147 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
2154 refcount_acquire(&ab->refcount);
2155 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab,
2156 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV);
2157 m->m_ext.ext_flags |= EXT_FLAG_AIOTX;
2158 job->aio_sent += m->m_len;
2160 sbappendstream(sb, m, 0);
2163 if (!(inp->inp_flags & INP_DROPPED)) {
2164 tp = intotcpcb(inp);
2166 tp->t_flags |= TF_MORETOCOME;
2167 error = tp->t_fb->tfb_tcp_output(tp);
2169 tp->t_flags &= ~TF_MORETOCOME;
2181 * If this is a non-blocking socket and the request has not
2182 * been fully completed, requeue it until the socket is ready
2185 if (job->aio_sent < job->uaiocb.aio_nbytes &&
2186 !(so->so_state & SS_NBIO)) {
2188 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
2193 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
2198 * If the request will not be requeued, drop a reference on
2199 * the aiotx buffer. Any mbufs in flight should still
2200 * contain a reference, but this drops the reference that the
2201 * job owns while it is waiting to queue mbufs to the socket.
2203 free_aiotx_buffer(ab);
2208 job->aio_error = error;
2209 free_aiotx_buffer(ab);
2211 MPASS(job->aio_sent == 0);
2212 aio_complete(job, -1, error);
2221 t4_aiotx_task(void *context, int pending)
2223 struct toepcb *toep = context;
2224 struct inpcb *inp = toep->inp;
2225 struct socket *so = inp->inp_socket;
2228 CURVNET_SET(toep->vnet);
2229 SOCKBUF_LOCK(&so->so_snd);
2230 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) {
2231 job = TAILQ_FIRST(&toep->aiotx_jobq);
2232 TAILQ_REMOVE(&toep->aiotx_jobq, job, list);
2233 if (!aio_clear_cancel_function(job))
2236 t4_aiotx_process_job(toep, so, job);
2238 toep->aiotx_task_active = false;
2239 SOCKBUF_UNLOCK(&so->so_snd);
2246 t4_aiotx_queue_toep(struct toepcb *toep)
2249 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd);
2250 #ifdef VERBOSE_TRACES
2251 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s",
2252 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false");
2254 if (toep->aiotx_task_active)
2256 toep->aiotx_task_active = true;
2258 soaio_enqueue(&toep->aiotx_task);
2262 t4_aiotx_cancel(struct kaiocb *job)
2264 struct aiotx_buffer *ab;
2268 struct toepcb *toep;
2270 so = job->fd_file->f_data;
2271 tp = so_sototcpcb(so);
2273 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE);
2277 if (!aio_cancel_cleared(job))
2278 TAILQ_REMOVE(&toep->aiotx_jobq, job, list);
2283 free_aiotx_buffer(ab);
2289 t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job)
2291 struct tcpcb *tp = so_sototcpcb(so);
2292 struct toepcb *toep = tp->t_toe;
2293 struct adapter *sc = td_adapter(toep->td);
2295 /* This only handles writes. */
2296 if (job->uaiocb.aio_lio_opcode != LIO_WRITE)
2297 return (EOPNOTSUPP);
2299 if (!sc->tt.tx_zcopy)
2300 return (EOPNOTSUPP);
2302 SOCKBUF_LOCK(&so->so_snd);
2303 #ifdef VERBOSE_TRACES
2304 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job);
2306 if (!aio_set_cancel_function(job, t4_aiotx_cancel))
2307 panic("new job was cancelled");
2308 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list);
2309 if (sowriteable(so))
2310 t4_aiotx_queue_toep(toep);
2311 SOCKBUF_UNLOCK(&so->so_snd);
2316 aiotx_init_toep(struct toepcb *toep)
2319 TAILQ_INIT(&toep->aiotx_jobq);
2320 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep);