2 * Copyright (c) 2012 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/sglist.h>
44 #include <netinet/in.h>
45 #include <netinet/in_pcb.h>
46 #include <netinet/ip.h>
47 #include <netinet/tcp_var.h>
49 #include <netinet/tcp_fsm.h>
50 #include <netinet/tcp_seq.h>
51 #include <netinet/toecore.h>
53 #include "common/common.h"
54 #include "common/t4_msg.h"
55 #include "common/t4_regs.h"
56 #include "common/t4_tcb.h"
57 #include "tom/t4_tom_l2t.h"
58 #include "tom/t4_tom.h"
60 VNET_DECLARE(int, tcp_do_autosndbuf);
61 #define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf)
62 VNET_DECLARE(int, tcp_autosndbuf_inc);
63 #define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc)
64 VNET_DECLARE(int, tcp_autosndbuf_max);
65 #define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max)
66 VNET_DECLARE(int, tcp_do_autorcvbuf);
67 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
68 VNET_DECLARE(int, tcp_autorcvbuf_inc);
69 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
70 VNET_DECLARE(int, tcp_autorcvbuf_max);
71 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
74 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
77 struct fw_flowc_wr *flowc;
78 unsigned int nparams = ftxp ? 8 : 6, flowclen;
79 struct port_info *pi = toep->port;
80 struct adapter *sc = pi->adapter;
81 unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
82 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
84 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
85 ("%s: flowc for tid %u sent already", __func__, toep->tid));
87 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
89 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
91 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
94 panic("%s: allocation failure.", __func__);
97 memset(flowc, 0, wr->wr_len);
99 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
100 V_FW_FLOWC_WR_NPARAMS(nparams));
101 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
102 V_FW_WR_FLOWID(toep->tid));
104 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
105 flowc->mnemval[0].val = htobe32(pfvf);
106 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
107 flowc->mnemval[1].val = htobe32(pi->tx_chan);
108 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
109 flowc->mnemval[2].val = htobe32(pi->tx_chan);
110 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
111 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id);
113 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);
115 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
116 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt);
117 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
118 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt);
119 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
120 flowc->mnemval[6].val = htobe32(sndbuf);
121 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
122 flowc->mnemval[7].val = htobe32(ftxp->mss);
124 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
125 flowc->mnemval[4].val = htobe32(512);
126 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
127 flowc->mnemval[5].val = htobe32(512);
130 txsd->tx_credits = howmany(flowclen, 16);
132 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
133 ("%s: not enough credits (%d)", __func__, toep->tx_credits));
134 toep->tx_credits -= txsd->tx_credits;
135 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
139 toep->flags |= TPF_FLOWC_WR_SENT;
144 send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
147 struct cpl_abort_req *req;
149 struct inpcb *inp = toep->inp;
150 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */
152 INP_WLOCK_ASSERT(inp);
154 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s",
156 inp->inp_flags & INP_DROPPED ? "inp dropped" :
157 tcpstates[tp->t_state],
158 toep->flags, inp->inp_flags,
159 toep->flags & TPF_ABORT_SHUTDOWN ?
160 " (abort already in progress)" : "");
162 if (toep->flags & TPF_ABORT_SHUTDOWN)
163 return; /* abort already in progress */
165 toep->flags |= TPF_ABORT_SHUTDOWN;
167 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
168 ("%s: flowc_wr not sent for tid %d.", __func__, tid));
170 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
173 panic("%s: allocation failure.", __func__);
177 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid);
178 if (inp->inp_flags & INP_DROPPED)
179 req->rsvd0 = htobe32(snd_nxt);
181 req->rsvd0 = htobe32(tp->snd_nxt);
182 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT);
183 req->cmd = CPL_ABORT_SEND_RST;
186 * XXX: What's the correct way to tell that the inp hasn't been detached
187 * from its socket? Should I even be flushing the snd buffer here?
189 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
190 struct socket *so = inp->inp_socket;
192 if (so != NULL) /* because I'm not sure. See comment above */
193 sbflush(&so->so_snd);
196 t4_l2t_send(sc, wr, toep->l2te);
200 * Called when a connection is established to translate the TCP options
201 * reported by HW to FreeBSD's native format.
204 assign_rxopt(struct tcpcb *tp, unsigned int opt)
206 struct toepcb *toep = tp->t_toe;
207 struct adapter *sc = td_adapter(toep->td);
209 INP_LOCK_ASSERT(tp->t_inpcb);
211 tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - 40;
213 if (G_TCPOPT_TSTAMP(opt)) {
214 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */
215 tp->ts_recent = 0; /* hmmm */
216 tp->ts_recent_age = tcp_ts_getticks();
217 tp->t_maxseg -= TCPOLEN_TSTAMP_APPA;
220 if (G_TCPOPT_SACK(opt))
221 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */
223 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */
225 if (G_TCPOPT_WSCALE_OK(opt))
226 tp->t_flags |= TF_RCVD_SCALE;
228 /* Doing window scaling? */
229 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
230 (TF_RCVD_SCALE | TF_REQ_SCALE)) {
231 tp->rcv_scale = tp->request_r_scale;
232 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt);
237 * Completes some final bits of initialization for just established connections
238 * and changes their state to TCPS_ESTABLISHED.
240 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1.
243 make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
246 struct inpcb *inp = toep->inp;
247 struct socket *so = inp->inp_socket;
248 struct tcpcb *tp = intotcpcb(inp);
250 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */
251 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */
252 uint16_t tcpopt = be16toh(opt);
253 struct flowc_tx_params ftxp;
255 INP_WLOCK_ASSERT(inp);
256 KASSERT(tp->t_state == TCPS_SYN_SENT ||
257 tp->t_state == TCPS_SYN_RECEIVED,
258 ("%s: TCP state %s", __func__, tcpstates[tp->t_state]));
260 CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p",
261 __func__, toep->tid, toep, inp);
263 tp->t_state = TCPS_ESTABLISHED;
264 tp->t_starttime = ticks;
265 TCPSTAT_INC(tcps_connects);
269 tp->rcv_wnd = toep->rx_credits << 10;
270 tp->rcv_adv += tp->rcv_wnd;
271 tp->last_ack_sent = tp->rcv_nxt;
274 * If we were unable to send all rx credits via opt0, save the remainder
275 * in rx_credits so that they can be handed over with the next credit
278 SOCKBUF_LOCK(&so->so_rcv);
279 bufsize = select_rcv_wnd(so);
280 SOCKBUF_UNLOCK(&so->so_rcv);
281 toep->rx_credits = bufsize - tp->rcv_wnd;
285 tp->snd_una = iss + 1;
286 tp->snd_nxt = iss + 1;
287 tp->snd_max = iss + 1;
289 assign_rxopt(tp, tcpopt);
291 SOCKBUF_LOCK(&so->so_snd);
292 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf)
293 bufsize = V_tcp_autosndbuf_max;
295 bufsize = sbspace(&so->so_snd);
296 SOCKBUF_UNLOCK(&so->so_snd);
298 ftxp.snd_nxt = tp->snd_nxt;
299 ftxp.rcv_nxt = tp->rcv_nxt;
300 ftxp.snd_space = bufsize;
301 ftxp.mss = tp->t_maxseg;
302 send_flowc_wr(toep, &ftxp);
308 send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits)
311 struct cpl_rx_data_ack *req;
312 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
314 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits));
316 wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
321 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid);
322 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits));
329 t4_rcvd(struct toedev *tod, struct tcpcb *tp)
331 struct adapter *sc = tod->tod_softc;
332 struct inpcb *inp = tp->t_inpcb;
333 struct socket *so = inp->inp_socket;
334 struct sockbuf *sb = &so->so_rcv;
335 struct toepcb *toep = tp->t_toe;
338 INP_WLOCK_ASSERT(inp);
341 KASSERT(toep->sb_cc >= sb->sb_cc,
342 ("%s: sb %p has more data (%d) than last time (%d).",
343 __func__, sb, sb->sb_cc, toep->sb_cc));
344 toep->rx_credits += toep->sb_cc - sb->sb_cc;
345 toep->sb_cc = sb->sb_cc;
346 credits = toep->rx_credits;
350 (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) {
352 credits = send_rx_credits(sc, toep, credits);
354 toep->rx_credits -= credits;
356 tp->rcv_wnd += credits;
357 tp->rcv_adv += credits;
362 * Close a connection by sending a CPL_CLOSE_CON_REQ message.
365 close_conn(struct adapter *sc, struct toepcb *toep)
368 struct cpl_close_con_req *req;
369 unsigned int tid = toep->tid;
371 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid,
372 toep->flags & TPF_FIN_SENT ? ", IGNORED" : "");
374 if (toep->flags & TPF_FIN_SENT)
377 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
378 ("%s: flowc_wr not sent for tid %u.", __func__, tid));
380 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
383 panic("%s: allocation failure.", __func__);
387 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) |
388 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr)));
389 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) |
390 V_FW_WR_FLOWID(tid));
391 req->wr.wr_lo = cpu_to_be64(0);
392 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
395 toep->flags |= TPF_FIN_SENT;
396 toep->flags &= ~TPF_SEND_FIN;
397 t4_l2t_send(sc, wr, toep->l2te);
402 #define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
403 #define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16))
405 /* Maximum amount of immediate data we could stuff in a WR */
407 max_imm_payload(int tx_credits)
409 const int n = 2; /* Use only up to 2 desc for imm. data WR */
411 KASSERT(tx_credits >= 0 &&
412 tx_credits <= MAX_OFLD_TX_CREDITS,
413 ("%s: %d credits", __func__, tx_credits));
415 if (tx_credits < MIN_OFLD_TX_CREDITS)
418 if (tx_credits >= (n * EQ_ESIZE) / 16)
419 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr));
421 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr));
424 /* Maximum number of SGL entries we could stuff in a WR */
426 max_dsgl_nsegs(int tx_credits)
428 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */
429 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS;
431 KASSERT(tx_credits >= 0 &&
432 tx_credits <= MAX_OFLD_TX_CREDITS,
433 ("%s: %d credits", __func__, tx_credits));
435 if (tx_credits < MIN_OFLD_TX_CREDITS)
438 nseg += 2 * (sge_pair_credits * 16 / 24);
439 if ((sge_pair_credits * 16) % 24 == 16)
446 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
447 unsigned int plen, uint8_t credits, int more_to_come)
449 struct fw_ofld_tx_data_wr *txwr = dst;
450 int shove = !more_to_come;
454 * We always request completion notifications from the firmware. The
455 * only exception is when we know we'll get more data to send shortly
456 * and that we'll have some tx credits remaining to transmit that data.
458 if (more_to_come && toep->tx_credits - credits >= MIN_OFLD_TX_CREDITS)
461 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) |
462 V_FW_WR_COMPL(compl) | V_FW_WR_IMMDLEN(immdlen));
463 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
464 V_FW_WR_LEN16(credits));
465 txwr->tunnel_to_proxy =
466 htobe32(V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode) |
467 V_FW_OFLD_TX_DATA_WR_URGENT(0) | /* XXX */
468 V_FW_OFLD_TX_DATA_WR_SHOVE(shove));
469 txwr->plen = htobe32(plen);
473 * Generate a DSGL from a starting mbuf. The total number of segments and the
474 * maximum segments in any one mbuf are provided.
477 write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
480 struct ulptx_sgl *usgl = dst;
483 struct sglist_seg segs[n];
485 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
487 sglist_init(&sg, n, segs);
488 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
489 V_ULPTX_NSGE(nsegs));
492 for (m = start; m != stop; m = m->m_next) {
493 rc = sglist_append(&sg, mtod(m, void *), m->m_len);
494 if (__predict_false(rc != 0))
495 panic("%s: sglist_append %d", __func__, rc);
497 for (j = 0; j < sg.sg_nseg; i++, j++) {
499 usgl->len0 = htobe32(segs[j].ss_len);
500 usgl->addr0 = htobe64(segs[j].ss_paddr);
502 usgl->sge[i / 2].len[i & 1] =
503 htobe32(segs[j].ss_len);
504 usgl->sge[i / 2].addr[i & 1] =
505 htobe64(segs[j].ss_paddr);
514 usgl->sge[i / 2].len[1] = htobe32(0);
515 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p",
516 __func__, nsegs, start, stop));
520 * Max number of SGL entries an offload tx work request can have. This is 41
521 * (1 + 40) for a full 512B work request.
522 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40)
524 #define OFLD_SGL_LEN (41)
527 * Send data and/or a FIN to the peer.
529 * The socket's so_snd buffer consists of a stream of data starting with sb_mb
530 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that
534 t4_push_frames(struct adapter *sc, struct toepcb *toep)
536 struct mbuf *sndptr, *m, *sb_sndptr;
537 struct fw_ofld_tx_data_wr *txwr;
539 unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
540 struct inpcb *inp = toep->inp;
541 struct tcpcb *tp = intotcpcb(inp);
542 struct socket *so = inp->inp_socket;
543 struct sockbuf *sb = &so->so_snd;
545 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
547 INP_WLOCK_ASSERT(inp);
548 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
549 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
551 if (__predict_false(toep->ulp_mode != ULP_MODE_NONE &&
552 toep->ulp_mode != ULP_MODE_TCPDDP))
553 CXGBE_UNIMPLEMENTED("ulp_mode");
556 * This function doesn't resume by itself. Someone else must clear the
557 * flag and call this function.
559 if (__predict_false(toep->flags & TPF_TX_SUSPENDED))
563 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
564 max_imm = max_imm_payload(tx_credits);
565 max_nsegs = max_dsgl_nsegs(tx_credits);
568 sb_sndptr = sb->sb_sndptr;
569 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb;
572 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
573 for (m = sndptr; m != NULL; m = m->m_next) {
574 int n = sglist_count(mtod(m, void *), m->m_len);
579 /* This mbuf sent us _over_ the nsegs limit, back out */
580 if (plen > max_imm && nsegs > max_nsegs) {
584 /* Too few credits */
585 toep->flags |= TPF_TX_SUSPENDED;
592 if (max_nsegs_1mbuf < n)
594 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */
596 /* This mbuf put us right at the max_nsegs limit */
597 if (plen > max_imm && nsegs == max_nsegs) {
603 if (sb->sb_flags & SB_AUTOSIZE &&
604 V_tcp_do_autosndbuf &&
605 sb->sb_hiwat < V_tcp_autosndbuf_max &&
606 sbspace(sb) < sb->sb_hiwat / 8 * 7) {
607 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
608 V_tcp_autosndbuf_max);
610 if (!sbreserve_locked(sb, newsize, so, NULL))
611 sb->sb_flags &= ~SB_AUTOSIZE;
613 sowwakeup_locked(so); /* room available */
614 SOCKBUF_UNLOCK_ASSERT(sb);
621 /* nothing to send */
624 ("%s: nothing to send, but m != NULL", __func__));
628 if (__predict_false(toep->flags & TPF_FIN_SENT))
629 panic("%s: excess tx.", __func__);
631 if (plen <= max_imm) {
633 /* Immediate data tx */
635 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
638 /* XXX: how will we recover from this? */
639 toep->flags |= TPF_TX_SUSPENDED;
643 credits = howmany(wr->wr_len, 16);
644 write_tx_wr(txwr, toep, plen, plen, credits,
645 tp->t_flags & TF_MORETOCOME);
646 m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
652 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
653 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
654 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
656 /* XXX: how will we recover from this? */
657 toep->flags |= TPF_TX_SUSPENDED;
661 credits = howmany(wr_len, 16);
662 write_tx_wr(txwr, toep, 0, plen, credits,
663 tp->t_flags & TF_MORETOCOME);
664 write_tx_sgl(txwr + 1, sndptr, m, nsegs,
667 uint64_t *pad = (uint64_t *)
668 ((uintptr_t)txwr + wr_len);
673 KASSERT(toep->tx_credits >= credits,
674 ("%s: not enough credits", __func__));
676 toep->tx_credits -= credits;
682 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__));
683 sb->sb_sndptr = sb_sndptr;
686 toep->flags |= TPF_TX_DATA_SENT;
688 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
690 txsd->tx_credits = credits;
692 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
694 txsd = &toep->txsd[0];
698 t4_l2t_send(sc, wr, toep->l2te);
701 /* Send a FIN if requested, but only if there's no more data to send */
702 if (m == NULL && toep->flags & TPF_SEND_FIN)
703 close_conn(sc, toep);
707 t4_tod_output(struct toedev *tod, struct tcpcb *tp)
709 struct adapter *sc = tod->tod_softc;
711 struct inpcb *inp = tp->t_inpcb;
713 struct toepcb *toep = tp->t_toe;
715 INP_WLOCK_ASSERT(inp);
716 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
717 ("%s: inp %p dropped.", __func__, inp));
718 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
720 t4_push_frames(sc, toep);
726 t4_send_fin(struct toedev *tod, struct tcpcb *tp)
728 struct adapter *sc = tod->tod_softc;
730 struct inpcb *inp = tp->t_inpcb;
732 struct toepcb *toep = tp->t_toe;
734 INP_WLOCK_ASSERT(inp);
735 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
736 ("%s: inp %p dropped.", __func__, inp));
737 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
739 toep->flags |= TPF_SEND_FIN;
740 t4_push_frames(sc, toep);
746 t4_send_rst(struct toedev *tod, struct tcpcb *tp)
748 struct adapter *sc = tod->tod_softc;
749 #if defined(INVARIANTS)
750 struct inpcb *inp = tp->t_inpcb;
752 struct toepcb *toep = tp->t_toe;
754 INP_WLOCK_ASSERT(inp);
755 KASSERT((inp->inp_flags & INP_DROPPED) == 0,
756 ("%s: inp %p dropped.", __func__, inp));
757 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
760 KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
761 ("%s: flowc for tid %u [%s] not sent already",
762 __func__, toep->tid, tcpstates[tp->t_state]));
764 send_reset(sc, toep, 0);
769 * Peer has sent us a FIN.
772 do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
774 struct adapter *sc = iq->adapter;
775 const struct cpl_peer_close *cpl = (const void *)(rss + 1);
776 unsigned int tid = GET_TID(cpl);
777 struct toepcb *toep = lookup_tid(sc, tid);
778 struct inpcb *inp = toep->inp;
779 struct tcpcb *tp = NULL;
783 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
786 KASSERT(opcode == CPL_PEER_CLOSE,
787 ("%s: unexpected opcode 0x%x", __func__, opcode));
788 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
790 if (__predict_false(toep->flags & TPF_SYNQE)) {
792 struct synq_entry *synqe = (void *)toep;
794 INP_WLOCK(synqe->lctx->inp);
795 if (synqe->flags & TPF_SYNQE_HAS_L2TE) {
796 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN,
797 ("%s: listen socket closed but tid %u not aborted.",
801 * do_pass_accept_req is still running and will
802 * eventually take care of this tid.
805 INP_WUNLOCK(synqe->lctx->inp);
807 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
812 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
814 INP_INFO_WLOCK(&V_tcbinfo);
818 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__,
819 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp);
821 if (toep->flags & TPF_ABORT_SHUTDOWN)
824 tp->rcv_nxt++; /* FIN */
826 so = inp->inp_socket;
829 if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) {
830 m = get_ddp_mbuf(be32toh(cpl->rcv_nxt) - tp->rcv_nxt);
831 tp->rcv_nxt = be32toh(cpl->rcv_nxt);
832 toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
834 KASSERT(toep->sb_cc >= sb->sb_cc,
835 ("%s: sb %p has more data (%d) than last time (%d).",
836 __func__, sb, sb->sb_cc, toep->sb_cc));
837 toep->rx_credits += toep->sb_cc - sb->sb_cc;
838 #ifdef USE_DDP_RX_FLOW_CONTROL
839 toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */
841 sbappendstream_locked(sb, m);
842 toep->sb_cc = sb->sb_cc;
844 socantrcvmore_locked(so); /* unlocks the sockbuf */
846 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
847 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
848 be32toh(cpl->rcv_nxt)));
850 switch (tp->t_state) {
851 case TCPS_SYN_RECEIVED:
852 tp->t_starttime = ticks;
855 case TCPS_ESTABLISHED:
856 tp->t_state = TCPS_CLOSE_WAIT;
859 case TCPS_FIN_WAIT_1:
860 tp->t_state = TCPS_CLOSING;
863 case TCPS_FIN_WAIT_2:
865 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
866 INP_INFO_WUNLOCK(&V_tcbinfo);
869 final_cpl_received(toep);
873 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n",
874 __func__, tid, tp->t_state);
878 INP_INFO_WUNLOCK(&V_tcbinfo);
883 * Peer has ACK'd our FIN.
886 do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
889 struct adapter *sc = iq->adapter;
890 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1);
891 unsigned int tid = GET_TID(cpl);
892 struct toepcb *toep = lookup_tid(sc, tid);
893 struct inpcb *inp = toep->inp;
894 struct tcpcb *tp = NULL;
895 struct socket *so = NULL;
897 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
900 KASSERT(opcode == CPL_CLOSE_CON_RPL,
901 ("%s: unexpected opcode 0x%x", __func__, opcode));
902 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
903 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
905 INP_INFO_WLOCK(&V_tcbinfo);
909 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x",
910 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags);
912 if (toep->flags & TPF_ABORT_SHUTDOWN)
915 so = inp->inp_socket;
916 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */
918 switch (tp->t_state) {
919 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */
922 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
923 INP_INFO_WUNLOCK(&V_tcbinfo);
926 final_cpl_received(toep); /* no more CPLs expected */
934 case TCPS_FIN_WAIT_1:
935 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
936 soisdisconnected(so);
937 tp->t_state = TCPS_FIN_WAIT_2;
942 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n",
943 __func__, tid, tcpstates[tp->t_state]);
947 INP_INFO_WUNLOCK(&V_tcbinfo);
952 send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid,
956 struct cpl_abort_rpl *cpl;
958 wr = alloc_wrqe(sizeof(*cpl), ofld_txq);
961 panic("%s: allocation failure.", __func__);
965 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid);
966 cpl->cmd = rst_status;
972 abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason)
974 switch (abort_reason) {
975 case CPL_ERR_BAD_SYN:
976 case CPL_ERR_CONN_RESET:
977 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET);
978 case CPL_ERR_XMIT_TIMEDOUT:
979 case CPL_ERR_PERSIST_TIMEDOUT:
980 case CPL_ERR_FINWAIT2_TIMEDOUT:
981 case CPL_ERR_KEEPALIVE_TIMEDOUT:
989 * TCP RST from the peer, timeout, or some other such critical error.
992 do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
994 struct adapter *sc = iq->adapter;
995 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1);
996 unsigned int tid = GET_TID(cpl);
997 struct toepcb *toep = lookup_tid(sc, tid);
998 struct sge_wrq *ofld_txq = toep->ofld_txq;
1002 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1005 KASSERT(opcode == CPL_ABORT_REQ_RSS,
1006 ("%s: unexpected opcode 0x%x", __func__, opcode));
1007 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1009 if (toep->flags & TPF_SYNQE)
1010 return (do_abort_req_synqe(iq, rss, m));
1012 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1014 if (negative_advice(cpl->status)) {
1015 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)",
1016 __func__, cpl->status, tid, toep->flags);
1017 return (0); /* Ignore negative advice */
1021 INP_INFO_WLOCK(&V_tcbinfo); /* for tcp_close */
1024 tp = intotcpcb(inp);
1027 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d",
1028 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags,
1029 inp->inp_flags, cpl->status);
1032 * If we'd initiated an abort earlier the reply to it is responsible for
1033 * cleaning up resources. Otherwise we tear everything down right here
1034 * right now. We owe the T4 a CPL_ABORT_RPL no matter what.
1036 if (toep->flags & TPF_ABORT_SHUTDOWN) {
1040 toep->flags |= TPF_ABORT_SHUTDOWN;
1042 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
1043 struct socket *so = inp->inp_socket;
1046 so_error_set(so, abort_status_to_errno(tp,
1050 INP_WLOCK(inp); /* re-acquire */
1053 final_cpl_received(toep);
1055 INP_INFO_WUNLOCK(&V_tcbinfo);
1056 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
1061 * Reply to the CPL_ABORT_REQ (send_reset)
1064 do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1066 struct adapter *sc = iq->adapter;
1067 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1068 unsigned int tid = GET_TID(cpl);
1069 struct toepcb *toep = lookup_tid(sc, tid);
1070 struct inpcb *inp = toep->inp;
1072 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1075 KASSERT(opcode == CPL_ABORT_RPL_RSS,
1076 ("%s: unexpected opcode 0x%x", __func__, opcode));
1077 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1079 if (toep->flags & TPF_SYNQE)
1080 return (do_abort_rpl_synqe(iq, rss, m));
1082 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1084 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d",
1085 __func__, tid, toep, inp, cpl->status);
1087 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1088 ("%s: wasn't expecting abort reply", __func__));
1091 final_cpl_received(toep);
1097 do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1099 struct adapter *sc = iq->adapter;
1100 const struct cpl_rx_data *cpl = mtod(m, const void *);
1101 unsigned int tid = GET_TID(cpl);
1102 struct toepcb *toep = lookup_tid(sc, tid);
1103 struct inpcb *inp = toep->inp;
1108 uint32_t ddp_placed = 0;
1110 if (__predict_false(toep->flags & TPF_SYNQE)) {
1112 struct synq_entry *synqe = (void *)toep;
1114 INP_WLOCK(synqe->lctx->inp);
1115 if (synqe->flags & TPF_SYNQE_HAS_L2TE) {
1116 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN,
1117 ("%s: listen socket closed but tid %u not aborted.",
1121 * do_pass_accept_req is still running and will
1122 * eventually take care of this tid.
1125 INP_WUNLOCK(synqe->lctx->inp);
1127 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1133 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1135 /* strip off CPL header */
1136 m_adj(m, sizeof(*cpl));
1137 len = m->m_pkthdr.len;
1140 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1141 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1142 __func__, tid, len, inp->inp_flags);
1148 tp = intotcpcb(inp);
1150 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq)))
1151 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt;
1154 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
1156 tp->t_rcvtime = ticks;
1158 so = inp_inpcbtosocket(inp);
1162 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1163 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1164 __func__, tid, len);
1169 INP_INFO_WLOCK(&V_tcbinfo);
1171 tp = tcp_drop(tp, ECONNRESET);
1174 INP_INFO_WUNLOCK(&V_tcbinfo);
1179 /* receive buffer autosize */
1180 if (sb->sb_flags & SB_AUTOSIZE &&
1181 V_tcp_do_autorcvbuf &&
1182 sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1183 len > (sbspace(sb) / 8 * 7)) {
1184 unsigned int hiwat = sb->sb_hiwat;
1185 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
1186 V_tcp_autorcvbuf_max);
1188 if (!sbreserve_locked(sb, newsize, so, NULL))
1189 sb->sb_flags &= ~SB_AUTOSIZE;
1191 toep->rx_credits += newsize - hiwat;
1194 if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1195 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off;
1198 if (toep->ddp_flags & DDP_SC_REQ)
1199 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ;
1201 KASSERT(cpl->ddp_off == 1,
1202 ("%s: DDP switched on by itself.",
1205 /* Fell out of DDP mode */
1206 toep->ddp_flags &= ~(DDP_ON | DDP_BUF0_ACTIVE |
1210 insert_ddp_data(toep, ddp_placed);
1214 if ((toep->ddp_flags & DDP_OK) == 0 &&
1215 time_uptime >= toep->ddp_disabled + DDP_RETRY_WAIT) {
1216 toep->ddp_score = DDP_LOW_SCORE;
1217 toep->ddp_flags |= DDP_OK;
1218 CTR3(KTR_CXGBE, "%s: tid %u DDP_OK @ %u",
1219 __func__, tid, time_uptime);
1222 if (toep->ddp_flags & DDP_ON) {
1225 * CPL_RX_DATA with DDP on can only be an indicate. Ask
1226 * soreceive to post a buffer or disable DDP. The
1227 * payload that arrived in this indicate is appended to
1228 * the socket buffer as usual.
1233 "%s: tid %u (0x%x) DDP indicate (seq 0x%x, len %d)",
1234 __func__, tid, toep->flags, be32toh(cpl->seq), len);
1236 sb->sb_flags |= SB_DDP_INDICATE;
1237 } else if ((toep->ddp_flags & (DDP_OK|DDP_SC_REQ)) == DDP_OK &&
1238 tp->rcv_wnd > DDP_RSVD_WIN && len >= sc->tt.ddp_thres) {
1241 * DDP allowed but isn't on (and a request to switch it
1242 * on isn't pending either), and conditions are ripe for
1243 * it to work. Switch it on.
1246 enable_ddp(sc, toep);
1250 KASSERT(toep->sb_cc >= sb->sb_cc,
1251 ("%s: sb %p has more data (%d) than last time (%d).",
1252 __func__, sb, sb->sb_cc, toep->sb_cc));
1253 toep->rx_credits += toep->sb_cc - sb->sb_cc;
1254 sbappendstream_locked(sb, m);
1255 toep->sb_cc = sb->sb_cc;
1256 sorwakeup_locked(so);
1257 SOCKBUF_UNLOCK_ASSERT(sb);
1263 #define S_CPL_FW4_ACK_OPCODE 24
1264 #define M_CPL_FW4_ACK_OPCODE 0xff
1265 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE)
1266 #define G_CPL_FW4_ACK_OPCODE(x) \
1267 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE)
1269 #define S_CPL_FW4_ACK_FLOWID 0
1270 #define M_CPL_FW4_ACK_FLOWID 0xffffff
1271 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID)
1272 #define G_CPL_FW4_ACK_FLOWID(x) \
1273 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID)
1275 #define S_CPL_FW4_ACK_CR 24
1276 #define M_CPL_FW4_ACK_CR 0xff
1277 #define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR)
1278 #define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR)
1280 #define S_CPL_FW4_ACK_SEQVAL 0
1281 #define M_CPL_FW4_ACK_SEQVAL 0x1
1282 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL)
1283 #define G_CPL_FW4_ACK_SEQVAL(x) \
1284 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL)
1285 #define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U)
1288 do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1290 struct adapter *sc = iq->adapter;
1291 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
1292 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
1293 struct toepcb *toep = lookup_tid(sc, tid);
1297 uint8_t credits = cpl->credits;
1298 struct ofld_tx_sdesc *txsd;
1301 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl)));
1305 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and
1306 * now this comes back carrying the credits for the flowc.
1308 if (__predict_false(toep->flags & TPF_SYNQE)) {
1309 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1310 ("%s: credits for a synq entry %p", __func__, toep));
1316 KASSERT(opcode == CPL_FW4_ACK,
1317 ("%s: unexpected opcode 0x%x", __func__, opcode));
1318 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1319 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1323 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) {
1328 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0,
1329 ("%s: inp_flags 0x%x", __func__, inp->inp_flags));
1331 tp = intotcpcb(inp);
1333 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) {
1334 tcp_seq snd_una = be32toh(cpl->snd_una);
1337 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) {
1339 "%s: unexpected seq# %x for TID %u, snd_una %x\n",
1340 __func__, snd_una, toep->tid, tp->snd_una);
1344 if (tp->snd_una != snd_una) {
1345 tp->snd_una = snd_una;
1346 tp->ts_recent_age = tcp_ts_getticks();
1350 so = inp->inp_socket;
1351 txsd = &toep->txsd[toep->txsd_cidx];
1354 KASSERT(credits >= txsd->tx_credits,
1355 ("%s: too many (or partial) credits", __func__));
1356 credits -= txsd->tx_credits;
1357 toep->tx_credits += txsd->tx_credits;
1361 KASSERT(toep->txsd_avail <= toep->txsd_total,
1362 ("%s: txsd avail > total", __func__));
1363 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) {
1364 txsd = &toep->txsd[0];
1365 toep->txsd_cidx = 0;
1370 struct sockbuf *sb = &so->so_snd;
1373 sbdrop_locked(sb, plen);
1374 sowwakeup_locked(so);
1375 SOCKBUF_UNLOCK_ASSERT(sb);
1379 if ((toep->flags & TPF_TX_SUSPENDED &&
1380 toep->tx_credits >= MIN_OFLD_TX_CREDITS) ||
1381 toep->tx_credits == toep->txsd_total *
1382 howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16)) {
1383 toep->flags &= ~TPF_TX_SUSPENDED;
1384 t4_push_frames(sc, toep);
1392 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1394 struct adapter *sc = iq->adapter;
1395 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1396 unsigned int tid = GET_TID(cpl);
1398 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1401 KASSERT(opcode == CPL_SET_TCB_RPL,
1402 ("%s: unexpected opcode 0x%x", __func__, opcode));
1403 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1405 if (tid >= sc->tids.ftid_base &&
1406 tid < sc->tids.ftid_base + sc->tids.nftids)
1407 return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */
1409 CXGBE_UNIMPLEMENTED(__func__);
1413 t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl,
1414 uint16_t word, uint64_t mask, uint64_t val)
1417 struct cpl_set_tcb_field *req;
1419 wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq);
1422 panic("%s: allocation failure.", __func__);
1426 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
1427 req->reply_ctrl = htobe16(V_NO_REPLY(1) |
1428 V_QUEUENO(toep->ofld_rxq->iq.abs_id));
1429 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1430 req->mask = htobe64(mask);
1431 req->val = htobe64(val);
1437 t4_init_cpl_io_handlers(struct adapter *sc)
1440 t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close);
1441 t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl);
1442 t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req);
1443 t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl);
1444 t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data);
1445 t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack);
1446 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl);
1450 t4_uninit_cpl_io_handlers(struct adapter *sc)
1453 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);