2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include "opt_inet6.h"
35 #include "opt_kern_tls.h"
36 #include "opt_ratelimit.h"
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/module.h>
46 #include <sys/protosw.h>
47 #include <sys/domain.h>
48 #include <sys/refcount.h>
49 #include <sys/rmlock.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
52 #include <sys/sysctl.h>
53 #include <sys/taskqueue.h>
55 #include <net/if_var.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 #include <netinet/in.h>
59 #include <netinet/in_pcb.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet6/scope6_var.h>
65 #include <netinet/tcp_fsm.h>
66 #include <netinet/tcp_timer.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/toecore.h>
69 #include <netinet/cc/cc.h>
72 #include "common/common.h"
73 #include "common/t4_msg.h"
74 #include "common/t4_regs.h"
75 #include "common/t4_regs_values.h"
76 #include "common/t4_tcb.h"
78 #include "tom/t4_tom_l2t.h"
79 #include "tom/t4_tom.h"
80 #include "tom/t4_tls.h"
82 static struct protosw toe_protosw;
83 static struct pr_usrreqs toe_usrreqs;
85 static struct protosw toe6_protosw;
86 static struct pr_usrreqs toe6_usrreqs;
89 static int t4_tom_mod_load(void);
90 static int t4_tom_mod_unload(void);
91 static int t4_tom_modevent(module_t, int, void *);
93 /* ULD ops and helpers */
94 static int t4_tom_activate(struct adapter *);
95 static int t4_tom_deactivate(struct adapter *);
97 static struct uld_info tom_uld_info = {
99 .activate = t4_tom_activate,
100 .deactivate = t4_tom_deactivate,
103 static void release_offload_resources(struct toepcb *);
104 static int alloc_tid_tabs(struct tid_info *);
105 static void free_tid_tabs(struct tid_info *);
106 static void free_tom_data(struct adapter *, struct tom_data *);
107 static void reclaim_wr_resources(void *, int);
110 alloc_toepcb(struct vi_info *vi, int flags)
112 struct port_info *pi = vi->pi;
113 struct adapter *sc = pi->adapter;
115 int tx_credits, txsd_total, len;
118 * The firmware counts tx work request credits in units of 16 bytes
119 * each. Reserve room for an ABORT_REQ so the driver never has to worry
120 * about tx credits if it wants to abort a connection.
122 tx_credits = sc->params.ofldq_wr_cred;
123 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
126 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
127 * immediate payload, and firmware counts tx work request credits in
128 * units of 16 byte. Calculate the maximum work requests possible.
130 txsd_total = tx_credits /
131 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
133 len = offsetof(struct toepcb, txsd) +
134 txsd_total * sizeof(struct ofld_tx_sdesc);
136 toep = malloc(len, M_CXGBE, M_ZERO | flags);
140 refcount_init(&toep->refcount, 1);
141 toep->td = sc->tom_softc;
144 toep->tx_total = tx_credits;
145 toep->tx_credits = tx_credits;
146 mbufq_init(&toep->ulp_pduq, INT_MAX);
147 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
148 toep->txsd_total = txsd_total;
149 toep->txsd_avail = txsd_total;
152 aiotx_init_toep(toep);
158 * Initialize a toepcb after its params have been filled out.
161 init_toepcb(struct vi_info *vi, struct toepcb *toep)
163 struct conn_params *cp = &toep->params;
164 struct port_info *pi = vi->pi;
165 struct adapter *sc = pi->adapter;
166 struct tx_cl_rl_params *tc;
168 if (cp->tc_idx >= 0 && cp->tc_idx < sc->chip_params->nsched_cls) {
169 tc = &pi->sched_params->cl_rl[cp->tc_idx];
170 mtx_lock(&sc->tc_lock);
171 if (tc->flags & CLRL_ERR) {
173 "%s: failed to associate traffic class %u with tid %u\n",
174 device_get_nameunit(vi->dev), cp->tc_idx,
180 mtx_unlock(&sc->tc_lock);
182 toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
183 toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
184 toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
187 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
190 toep->flags |= TPF_INITIALIZED;
196 hold_toepcb(struct toepcb *toep)
199 refcount_acquire(&toep->refcount);
204 free_toepcb(struct toepcb *toep)
207 if (refcount_release(&toep->refcount) == 0)
210 KASSERT(!(toep->flags & TPF_ATTACHED),
211 ("%s: attached to an inpcb", __func__));
212 KASSERT(!(toep->flags & TPF_CPL_PENDING),
213 ("%s: CPL pending", __func__));
215 if (toep->flags & TPF_INITIALIZED) {
216 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
217 ddp_uninit_toep(toep);
218 tls_uninit_toep(toep);
224 * Set up the socket for TCP offload.
227 offload_socket(struct socket *so, struct toepcb *toep)
229 struct tom_data *td = toep->td;
230 struct inpcb *inp = sotoinpcb(so);
231 struct tcpcb *tp = intotcpcb(inp);
234 INP_WLOCK_ASSERT(inp);
239 sb->sb_flags |= SB_NOCOALESCE;
243 sb->sb_flags |= SB_NOCOALESCE;
244 if (inp->inp_vflag & INP_IPV6)
245 so->so_proto = &toe6_protosw;
247 so->so_proto = &toe_protosw;
253 tp->t_flags |= TF_TOE;
255 /* Install an extra hold on inp */
257 toep->flags |= TPF_ATTACHED;
260 /* Add the TOE PCB to the active list */
261 mtx_lock(&td->toep_list_lock);
262 TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
263 mtx_unlock(&td->toep_list_lock);
266 /* This is _not_ the normal way to "unoffload" a socket. */
268 undo_offload_socket(struct socket *so)
270 struct inpcb *inp = sotoinpcb(so);
271 struct tcpcb *tp = intotcpcb(inp);
272 struct toepcb *toep = tp->t_toe;
273 struct tom_data *td = toep->td;
276 INP_WLOCK_ASSERT(inp);
280 sb->sb_flags &= ~SB_NOCOALESCE;
284 sb->sb_flags &= ~SB_NOCOALESCE;
289 tp->t_flags &= ~TF_TOE;
292 toep->flags &= ~TPF_ATTACHED;
293 if (in_pcbrele_wlocked(inp))
294 panic("%s: inp freed.", __func__);
296 mtx_lock(&td->toep_list_lock);
297 TAILQ_REMOVE(&td->toep_list, toep, link);
298 mtx_unlock(&td->toep_list_lock);
302 release_offload_resources(struct toepcb *toep)
304 struct tom_data *td = toep->td;
305 struct adapter *sc = td_adapter(td);
308 KASSERT(!(toep->flags & TPF_CPL_PENDING),
309 ("%s: %p has CPL pending.", __func__, toep));
310 KASSERT(!(toep->flags & TPF_ATTACHED),
311 ("%s: %p is still attached.", __func__, toep));
313 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
314 __func__, toep, tid, toep->l2te, toep->ce);
317 * These queues should have been emptied at approximately the same time
318 * that a normal connection's socket's so_snd would have been purged or
319 * drained. Do _not_ clean up here.
321 MPASS(mbufq_len(&toep->ulp_pduq) == 0);
322 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0);
324 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
325 ddp_assert_empty(toep);
327 MPASS(TAILQ_EMPTY(&toep->aiotx_jobq));
330 t4_l2t_release(toep->l2te);
333 remove_tid(sc, tid, toep->ce ? 2 : 1);
334 release_tid(sc, tid, toep->ctrlq);
338 t4_release_lip(sc, toep->ce);
340 if (toep->params.tc_idx != -1)
341 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx);
343 mtx_lock(&td->toep_list_lock);
344 TAILQ_REMOVE(&td->toep_list, toep, link);
345 mtx_unlock(&td->toep_list_lock);
351 * The kernel is done with the TCP PCB and this is our opportunity to unhook the
352 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no
353 * pending CPL) then it is time to release all resources tied to the toepcb.
355 * Also gets called when an offloaded active open fails and the TOM wants the
356 * kernel to take the TCP PCB back.
359 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
361 #if defined(KTR) || defined(INVARIANTS)
362 struct inpcb *inp = tp->t_inpcb;
364 struct toepcb *toep = tp->t_toe;
366 INP_WLOCK_ASSERT(inp);
368 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
369 KASSERT(toep->flags & TPF_ATTACHED,
370 ("%s: not attached", __func__));
373 if (tp->t_state == TCPS_SYN_SENT) {
374 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
375 __func__, toep->tid, toep, toep->flags, inp,
379 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
380 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
385 if (ulp_mode(toep) == ULP_MODE_TLS)
390 tp->t_flags &= ~TF_TOE;
391 toep->flags &= ~TPF_ATTACHED;
393 if (!(toep->flags & TPF_CPL_PENDING))
394 release_offload_resources(toep);
398 * setsockopt handler.
401 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name)
403 struct adapter *sc = tod->tod_softc;
404 struct toepcb *toep = tp->t_toe;
409 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name);
413 if (tp->t_state != TCPS_ESTABLISHED)
415 toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
416 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
417 V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0);
424 static inline uint64_t
425 get_tcb_tflags(const uint64_t *tcb)
428 return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32));
431 static inline uint32_t
432 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift)
434 #define LAST_WORD ((TCB_SIZE / 4) - 1)
439 MPASS(word <= LAST_WORD);
442 flit_idx = (LAST_WORD - word) / 2;
445 t1 = be64toh(tcb[flit_idx]) >> shift;
447 if (fls(mask) > 64 - shift) {
449 * Will spill over into the next logical flit, which is the flit
450 * before this one. The flit_idx before this one must be valid.
453 t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift);
455 return ((t2 | t1) & mask);
458 #define GET_TCB_FIELD(tcb, F) \
459 get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F)
462 * Issues a CPL_GET_TCB to read the entire TCB for the tid.
465 send_get_tcb(struct adapter *sc, u_int tid)
467 struct cpl_get_tcb *cpl;
468 struct wrq_cookie cookie;
470 MPASS(tid < sc->tids.ntids);
472 cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16),
474 if (__predict_false(cpl == NULL))
476 bzero(cpl, sizeof(*cpl));
477 INIT_TP_WR(cpl, tid);
478 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
479 cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
480 V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
482 commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
487 static struct tcb_histent *
488 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags)
490 struct tcb_histent *te;
492 MPASS(flags == M_NOWAIT || flags == M_WAITOK);
494 te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags);
497 mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF);
498 callout_init_mtx(&te->te_callout, &te->te_lock, 0);
506 free_tcb_histent(struct tcb_histent *te)
509 mtx_destroy(&te->te_lock);
514 * Start tracking the tid in the TCB history.
517 add_tid_to_history(struct adapter *sc, u_int tid)
519 struct tcb_histent *te = NULL;
520 struct tom_data *td = sc->tom_softc;
523 MPASS(tid < sc->tids.ntids);
525 if (td->tcb_history == NULL)
528 rw_wlock(&td->tcb_history_lock);
529 if (td->tcb_history[tid] != NULL) {
533 te = alloc_tcb_histent(sc, tid, M_NOWAIT);
538 mtx_lock(&te->te_lock);
539 rc = send_get_tcb(sc, tid);
541 te->te_flags |= TE_RPL_PENDING;
542 td->tcb_history[tid] = te;
546 mtx_unlock(&te->te_lock);
548 rw_wunlock(&td->tcb_history_lock);
553 remove_tcb_histent(struct tcb_histent *te)
555 struct adapter *sc = te->te_adapter;
556 struct tom_data *td = sc->tom_softc;
558 rw_assert(&td->tcb_history_lock, RA_WLOCKED);
559 mtx_assert(&te->te_lock, MA_OWNED);
560 MPASS(td->tcb_history[te->te_tid] == te);
562 td->tcb_history[te->te_tid] = NULL;
563 free_tcb_histent(te);
564 rw_wunlock(&td->tcb_history_lock);
567 static inline struct tcb_histent *
568 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem)
570 struct tcb_histent *te;
571 struct tom_data *td = sc->tom_softc;
573 MPASS(tid < sc->tids.ntids);
575 if (td->tcb_history == NULL)
579 rw_wlock(&td->tcb_history_lock);
581 rw_rlock(&td->tcb_history_lock);
582 te = td->tcb_history[tid];
584 mtx_lock(&te->te_lock);
585 return (te); /* with both locks held */
588 rw_wunlock(&td->tcb_history_lock);
590 rw_runlock(&td->tcb_history_lock);
596 release_tcb_histent(struct tcb_histent *te)
598 struct adapter *sc = te->te_adapter;
599 struct tom_data *td = sc->tom_softc;
601 mtx_assert(&te->te_lock, MA_OWNED);
602 mtx_unlock(&te->te_lock);
603 rw_assert(&td->tcb_history_lock, RA_RLOCKED);
604 rw_runlock(&td->tcb_history_lock);
608 request_tcb(void *arg)
610 struct tcb_histent *te = arg;
612 mtx_assert(&te->te_lock, MA_OWNED);
614 /* Noone else is supposed to update the histent. */
615 MPASS(!(te->te_flags & TE_RPL_PENDING));
616 if (send_get_tcb(te->te_adapter, te->te_tid) == 0)
617 te->te_flags |= TE_RPL_PENDING;
619 callout_schedule(&te->te_callout, hz / 100);
623 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb)
625 struct tom_data *td = te->te_adapter->tom_softc;
626 uint64_t tflags = get_tcb_tflags(tcb);
629 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) {
630 if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0)
632 if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0)
633 sample |= TS_DUPACKS;
634 if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold)
635 sample |= TS_FASTREXMT;
638 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) {
641 sample |= TS_SND_BACKLOGGED; /* for whatever reason. */
643 snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
644 if (tflags & V_TF_RECV_SCALE(1))
645 snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE);
646 if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd)
647 sample |= TS_CWND_LIMITED; /* maybe due to CWND */
650 if (tflags & V_TF_CCTRL_ECN(1)) {
653 * CE marker on incoming IP hdr, echoing ECE back in the TCP
654 * hdr. Indicates congestion somewhere on the way from the peer
657 if (tflags & V_TF_CCTRL_ECE(1))
658 sample |= TS_ECN_ECE;
661 * ECE seen and CWR sent (or about to be sent). Might indicate
662 * congestion on the way to the peer. This node is reducing its
663 * congestion window in response.
665 if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1)))
666 sample |= TS_ECN_CWR;
669 te->te_sample[te->te_pidx] = sample;
670 if (++te->te_pidx == nitems(te->te_sample))
672 memcpy(te->te_tcb, tcb, TCB_SIZE);
673 te->te_flags |= TE_ACTIVE;
677 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
679 struct adapter *sc = iq->adapter;
680 const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *);
681 const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1);
682 struct tcb_histent *te;
683 const u_int tid = GET_TID(cpl);
686 remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED;
687 te = lookup_tcb_histent(sc, tid, remove);
689 /* Not in the history. Who issued the GET_TCB for this? */
690 device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, "
691 "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid,
692 (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE),
693 GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE),
694 GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie);
698 MPASS(te->te_flags & TE_RPL_PENDING);
699 te->te_flags &= ~TE_RPL_PENDING;
701 remove_tcb_histent(te);
703 update_tcb_histent(te, tcb);
704 callout_reset(&te->te_callout, hz / 10, request_tcb, te);
705 release_tcb_histent(te);
713 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti)
717 ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE);
719 v = GET_TCB_FIELD(tcb, T_SRTT);
720 ti->tcpi_rtt = tcp_ticks_to_us(sc, v);
722 v = GET_TCB_FIELD(tcb, T_RTTVAR);
723 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v);
725 ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH);
726 ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND);
727 ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT);
729 v = GET_TCB_FIELD(tcb, TX_MAX);
730 ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW);
732 /* Receive window being advertised by us. */
733 ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */
734 ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND);
737 ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */
738 ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
739 if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1))
740 ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale;
742 ti->tcpi_snd_wscale = 0;
747 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te,
751 fill_tcp_info_from_tcb(sc, te->te_tcb, ti);
755 * Reads the TCB for the given tid using a memory window and copies it to 'buf'
756 * in the same format as CPL_GET_TCB_RPL.
759 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf)
765 MPASS(tid < sc->tids.ntids);
767 addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
768 rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE);
773 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) {
774 for (k = 0; k < 16; k++) {
776 tcb[i + k] = tcb[j + k];
783 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti)
785 uint64_t tcb[TCB_SIZE / sizeof(uint64_t)];
786 struct tcb_histent *te;
788 ti->tcpi_toe_tid = tid;
789 te = lookup_tcb_histent(sc, tid, false);
791 fill_tcp_info_from_history(sc, te, ti);
792 release_tcb_histent(te);
794 if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) {
795 /* XXX: tell firmware to flush TCB cache. */
797 read_tcb_using_memwin(sc, tid, tcb);
798 fill_tcp_info_from_tcb(sc, tcb, ti);
803 * Called by the kernel to allow the TOE driver to "refine" values filled up in
804 * the tcp_info for an offloaded connection.
807 t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti)
809 struct adapter *sc = tod->tod_softc;
810 struct toepcb *toep = tp->t_toe;
812 INP_WLOCK_ASSERT(tp->t_inpcb);
815 fill_tcp_info(sc, toep->tid, ti);
820 t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp,
821 struct ktls_session *tls, int direction)
823 struct toepcb *toep = tp->t_toe;
825 INP_WLOCK_ASSERT(tp->t_inpcb);
828 return (tls_alloc_ktls(toep, tls, direction));
833 * The TOE driver will not receive any more CPLs for the tid associated with the
834 * toepcb; release the hold on the inpcb.
837 final_cpl_received(struct toepcb *toep)
839 struct inpcb *inp = toep->inp;
841 KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
842 INP_WLOCK_ASSERT(inp);
843 KASSERT(toep->flags & TPF_CPL_PENDING,
844 ("%s: CPL not pending already?", __func__));
846 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
847 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
849 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
850 release_ddp_resources(toep);
851 else if (ulp_mode(toep) == ULP_MODE_TLS)
854 toep->flags &= ~TPF_CPL_PENDING;
855 mbufq_drain(&toep->ulp_pdu_reclaimq);
857 if (!(toep->flags & TPF_ATTACHED))
858 release_offload_resources(toep);
860 if (!in_pcbrele_wlocked(inp))
865 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
867 struct tid_info *t = &sc->tids;
869 MPASS(tid >= t->tid_base);
870 MPASS(tid - t->tid_base < t->ntids);
872 t->tid_tab[tid - t->tid_base] = ctx;
873 atomic_add_int(&t->tids_in_use, ntids);
877 lookup_tid(struct adapter *sc, int tid)
879 struct tid_info *t = &sc->tids;
881 return (t->tid_tab[tid - t->tid_base]);
885 update_tid(struct adapter *sc, int tid, void *ctx)
887 struct tid_info *t = &sc->tids;
889 t->tid_tab[tid - t->tid_base] = ctx;
893 remove_tid(struct adapter *sc, int tid, int ntids)
895 struct tid_info *t = &sc->tids;
897 t->tid_tab[tid - t->tid_base] = NULL;
898 atomic_subtract_int(&t->tids_in_use, ntids);
902 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt
903 * have the MSS that we should advertise in our SYN. Advertised MSS doesn't
904 * account for any TCP options so the effective MSS (only payload, no headers or
905 * options) could be different.
908 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc,
909 struct offload_settings *s)
911 unsigned short *mtus = &sc->params.mtus[0];
916 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc);
917 if (inc->inc_flags & INC_ISIPV6)
918 mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
920 mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr);
922 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++)
929 * Determine the receive window size for a socket.
932 select_rcv_wnd(struct socket *so)
936 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
938 wnd = sbspace(&so->so_rcv);
939 if (wnd < MIN_RCV_WND)
942 return min(wnd, MAX_RCV_WND);
946 select_rcv_wscale(void)
949 unsigned long space = sb_max;
951 if (space > MAX_RCV_WND)
954 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
961 calc_options0(struct vi_info *vi, struct conn_params *cp)
965 opt0 |= F_TCAM_BYPASS;
967 MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE);
968 opt0 |= V_WND_SCALE(cp->wscale);
970 MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS);
971 opt0 |= V_MSS_IDX(cp->mtu_idx);
973 MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE);
974 opt0 |= V_ULP_MODE(cp->ulp_mode);
976 MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ);
977 opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize);
979 MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size);
980 opt0 |= V_L2T_IDX(cp->l2t_idx);
982 opt0 |= V_SMAC_SEL(vi->smt_idx);
983 opt0 |= V_TX_CHAN(vi->pi->tx_chan);
985 MPASS(cp->keepalive == 0 || cp->keepalive == 1);
986 opt0 |= V_KEEP_ALIVE(cp->keepalive);
988 MPASS(cp->nagle == 0 || cp->nagle == 1);
989 opt0 |= V_NAGLE(cp->nagle);
991 return (htobe64(opt0));
995 calc_options2(struct vi_info *vi, struct conn_params *cp)
998 struct port_info *pi = vi->pi;
999 struct adapter *sc = pi->adapter;
1002 * rx flow control, rx coalesce, congestion control, and tx pace are all
1003 * explicitly set by the driver. On T5+ the ISS is also set by the
1004 * driver to the value picked by the kernel.
1007 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
1008 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
1010 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */
1011 opt2 |= F_T5_ISS; /* ISS provided in CPL */
1014 MPASS(cp->sack == 0 || cp->sack == 1);
1015 opt2 |= V_SACK_EN(cp->sack);
1017 MPASS(cp->tstamp == 0 || cp->tstamp == 1);
1018 opt2 |= V_TSTAMPS_EN(cp->tstamp);
1021 opt2 |= F_WND_SCALE_EN;
1023 MPASS(cp->ecn == 0 || cp->ecn == 1);
1024 opt2 |= V_CCTRL_ECN(cp->ecn);
1026 /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */
1028 opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
1030 opt2 |= F_RSS_QUEUE_VALID;
1031 opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id);
1033 MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL);
1034 opt2 |= V_CONG_CNTRL(cp->cong_algo);
1036 MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1);
1037 if (cp->rx_coalesce == 1)
1038 opt2 |= V_RX_COALESCE(M_RX_COALESCE);
1040 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
1041 #ifdef USE_DDP_RX_FLOW_CONTROL
1042 if (cp->ulp_mode == ULP_MODE_TCPDDP)
1043 opt2 |= F_RX_FC_DDP;
1046 return (htobe32(opt2));
1050 select_ntuple(struct vi_info *vi, struct l2t_entry *e)
1052 struct adapter *sc = vi->adapter;
1053 struct tp_params *tp = &sc->params.tp;
1054 uint64_t ntuple = 0;
1057 * Initialize each of the fields which we care about which are present
1058 * in the Compressed Filter Tuple.
1060 if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE)
1061 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
1063 if (tp->port_shift >= 0)
1064 ntuple |= (uint64_t)e->lport << tp->port_shift;
1066 if (tp->protocol_shift >= 0)
1067 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
1069 if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) {
1070 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) |
1071 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) <<
1076 return (htobe32((uint32_t)ntuple));
1078 return (htobe64(V_FILTER_TUPLE(ntuple)));
1082 is_tls_sock(struct socket *so, struct adapter *sc)
1084 struct inpcb *inp = sotoinpcb(so);
1087 /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */
1090 for (i = 0; i < sc->tt.num_tls_rx_ports; i++) {
1091 if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) ||
1092 inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) {
1102 * Initialize various connection parameters.
1105 init_conn_params(struct vi_info *vi , struct offload_settings *s,
1106 struct in_conninfo *inc, struct socket *so,
1107 const struct tcp_options *tcpopt, int16_t l2t_idx, struct conn_params *cp)
1109 struct port_info *pi = vi->pi;
1110 struct adapter *sc = pi->adapter;
1111 struct tom_tunables *tt = &sc->tt;
1112 struct inpcb *inp = sotoinpcb(so);
1113 struct tcpcb *tp = intotcpcb(inp);
1116 MPASS(s->offload != 0);
1118 /* Congestion control algorithm */
1119 if (s->cong_algo >= 0)
1120 cp->cong_algo = s->cong_algo & M_CONG_CNTRL;
1121 else if (sc->tt.cong_algorithm >= 0)
1122 cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL;
1124 struct cc_algo *cc = CC_ALGO(tp);
1126 if (strcasecmp(cc->name, "reno") == 0)
1127 cp->cong_algo = CONG_ALG_RENO;
1128 else if (strcasecmp(cc->name, "tahoe") == 0)
1129 cp->cong_algo = CONG_ALG_TAHOE;
1130 if (strcasecmp(cc->name, "newreno") == 0)
1131 cp->cong_algo = CONG_ALG_NEWRENO;
1132 if (strcasecmp(cc->name, "highspeed") == 0)
1133 cp->cong_algo = CONG_ALG_HIGHSPEED;
1136 * Use newreno in case the algorithm selected by the
1137 * host stack is not supported by the hardware.
1139 cp->cong_algo = CONG_ALG_NEWRENO;
1143 /* Tx traffic scheduling class. */
1144 if (s->sched_class >= 0 &&
1145 s->sched_class < sc->chip_params->nsched_cls) {
1146 cp->tc_idx = s->sched_class;
1150 /* Nagle's algorithm. */
1152 cp->nagle = s->nagle > 0 ? 1 : 0;
1154 cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
1156 /* TCP Keepalive. */
1157 if (V_tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE)
1162 /* Optimization that's specific to T5 @ 40G. */
1163 if (tt->tx_align >= 0)
1164 cp->tx_align = tt->tx_align > 0 ? 1 : 0;
1165 else if (chip_id(sc) == CHELSIO_T5 &&
1166 (port_top_speed(pi) > 10 || sc->params.nports > 2))
1172 if (can_tls_offload(sc) &&
1173 (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc))))
1174 cp->ulp_mode = ULP_MODE_TLS;
1175 else if (s->ddp > 0 ||
1176 (s->ddp < 0 && sc->tt.ddp && (so_options_get(so) & SO_NO_DDP) == 0))
1177 cp->ulp_mode = ULP_MODE_TCPDDP;
1179 cp->ulp_mode = ULP_MODE_NONE;
1181 /* Rx coalescing. */
1182 if (s->rx_coalesce >= 0)
1183 cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0;
1184 else if (cp->ulp_mode == ULP_MODE_TLS)
1185 cp->rx_coalesce = 0;
1186 else if (tt->rx_coalesce >= 0)
1187 cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0;
1189 cp->rx_coalesce = 1; /* default */
1192 * Index in the PMTU table. This controls the MSS that we announce in
1193 * our SYN initially, but after ESTABLISHED it controls the MSS that we
1196 cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
1198 /* Tx queue for this connection. */
1199 if (s->txq >= 0 && s->txq < vi->nofldtxq)
1200 cp->txq_idx = s->txq;
1202 cp->txq_idx = arc4random() % vi->nofldtxq;
1203 cp->txq_idx += vi->first_ofld_txq;
1205 /* Rx queue for this connection. */
1206 if (s->rxq >= 0 && s->rxq < vi->nofldrxq)
1207 cp->rxq_idx = s->rxq;
1209 cp->rxq_idx = arc4random() % vi->nofldrxq;
1210 cp->rxq_idx += vi->first_ofld_rxq;
1212 if (SOLISTENING(so)) {
1214 MPASS(tcpopt != NULL);
1216 /* TCP timestamp option */
1217 if (tcpopt->tstamp &&
1218 (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323)))
1225 (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack)))
1230 /* Receive window scaling. */
1231 if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323)
1232 cp->wscale = select_rcv_wscale();
1237 if (tcpopt->ecn && /* XXX: review. */
1238 (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn)))
1243 wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND);
1244 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1247 cp->sndbuf = tt->sndbuf;
1248 else if (so->sol_sbsnd_flags & SB_AUTOSIZE &&
1249 V_tcp_do_autosndbuf)
1250 cp->sndbuf = 256 * 1024;
1252 cp->sndbuf = so->sol_sbsnd_hiwat;
1256 /* TCP timestamp option */
1257 if (s->tstamp > 0 ||
1258 (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
1265 (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
1270 /* Receive window scaling */
1271 if (tp->t_flags & TF_REQ_SCALE)
1272 cp->wscale = select_rcv_wscale();
1277 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
1282 SOCKBUF_LOCK(&so->so_rcv);
1283 wnd = max(select_rcv_wnd(so), MIN_RCV_WND);
1284 SOCKBUF_UNLOCK(&so->so_rcv);
1285 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1288 cp->sndbuf = tt->sndbuf;
1290 SOCKBUF_LOCK(&so->so_snd);
1291 if (so->so_snd.sb_flags & SB_AUTOSIZE &&
1292 V_tcp_do_autosndbuf)
1293 cp->sndbuf = 256 * 1024;
1295 cp->sndbuf = so->so_snd.sb_hiwat;
1296 SOCKBUF_UNLOCK(&so->so_snd);
1300 cp->l2t_idx = l2t_idx;
1302 /* This will be initialized on ESTABLISHED. */
1307 negative_advice(int status)
1310 return (status == CPL_ERR_RTX_NEG_ADVICE ||
1311 status == CPL_ERR_PERSIST_NEG_ADVICE ||
1312 status == CPL_ERR_KEEPALV_NEG_ADVICE);
1316 alloc_tid_tab(struct tid_info *t, int flags)
1319 MPASS(t->ntids > 0);
1320 MPASS(t->tid_tab == NULL);
1322 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE,
1324 if (t->tid_tab == NULL)
1326 atomic_store_rel_int(&t->tids_in_use, 0);
1332 free_tid_tab(struct tid_info *t)
1335 KASSERT(t->tids_in_use == 0,
1336 ("%s: %d tids still in use.", __func__, t->tids_in_use));
1338 free(t->tid_tab, M_CXGBE);
1343 alloc_stid_tab(struct tid_info *t, int flags)
1346 MPASS(t->nstids > 0);
1347 MPASS(t->stid_tab == NULL);
1349 t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE,
1351 if (t->stid_tab == NULL)
1353 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
1354 t->stids_in_use = 0;
1355 TAILQ_INIT(&t->stids);
1356 t->nstids_free_head = t->nstids;
1362 free_stid_tab(struct tid_info *t)
1365 KASSERT(t->stids_in_use == 0,
1366 ("%s: %d tids still in use.", __func__, t->stids_in_use));
1368 if (mtx_initialized(&t->stid_lock))
1369 mtx_destroy(&t->stid_lock);
1370 free(t->stid_tab, M_CXGBE);
1375 free_tid_tabs(struct tid_info *t)
1383 alloc_tid_tabs(struct tid_info *t)
1387 rc = alloc_tid_tab(t, M_NOWAIT);
1391 rc = alloc_stid_tab(t, M_NOWAIT);
1402 alloc_tcb_history(struct adapter *sc, struct tom_data *td)
1405 if (sc->tids.ntids == 0 || sc->tids.ntids > 1024)
1407 rw_init(&td->tcb_history_lock, "TCB history");
1408 td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history),
1409 M_CXGBE, M_ZERO | M_NOWAIT);
1410 td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0));
1414 free_tcb_history(struct adapter *sc, struct tom_data *td)
1419 if (td->tcb_history != NULL) {
1420 for (i = 0; i < sc->tids.ntids; i++) {
1421 MPASS(td->tcb_history[i] == NULL);
1425 free(td->tcb_history, M_CXGBE);
1426 if (rw_initialized(&td->tcb_history_lock))
1427 rw_destroy(&td->tcb_history_lock);
1431 free_tom_data(struct adapter *sc, struct tom_data *td)
1434 ASSERT_SYNCHRONIZED_OP(sc);
1436 KASSERT(TAILQ_EMPTY(&td->toep_list),
1437 ("%s: TOE PCB list is not empty.", __func__));
1438 KASSERT(td->lctx_count == 0,
1439 ("%s: lctx hash table is not empty.", __func__));
1441 t4_free_ppod_region(&td->pr);
1443 if (td->listen_mask != 0)
1444 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
1446 if (mtx_initialized(&td->unsent_wr_lock))
1447 mtx_destroy(&td->unsent_wr_lock);
1448 if (mtx_initialized(&td->lctx_hash_lock))
1449 mtx_destroy(&td->lctx_hash_lock);
1450 if (mtx_initialized(&td->toep_list_lock))
1451 mtx_destroy(&td->toep_list_lock);
1453 free_tcb_history(sc, td);
1454 free_tid_tabs(&sc->tids);
1459 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen,
1466 max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) +
1467 max(sizeof(struct ip), sizeof(struct ip6_hdr)) +
1468 sizeof(struct tcphdr);
1470 MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN);
1472 pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT);
1476 ipv6 = inp->inp_vflag & INP_IPV6;
1479 if (EVL_VLANOFTAG(vtag) == 0xfff) {
1480 struct ether_header *eh = (void *)pkt;
1483 eh->ether_type = htons(ETHERTYPE_IPV6);
1485 eh->ether_type = htons(ETHERTYPE_IP);
1489 struct ether_vlan_header *evh = (void *)pkt;
1491 evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
1492 evh->evl_tag = htons(vtag);
1494 evh->evl_proto = htons(ETHERTYPE_IPV6);
1496 evh->evl_proto = htons(ETHERTYPE_IP);
1498 len += sizeof(*evh);
1502 struct ip6_hdr *ip6 = (void *)&pkt[len];
1504 ip6->ip6_vfc = IPV6_VERSION;
1505 ip6->ip6_plen = htons(sizeof(struct tcphdr));
1506 ip6->ip6_nxt = IPPROTO_TCP;
1507 if (open_type == OPEN_TYPE_ACTIVE) {
1508 ip6->ip6_src = inp->in6p_laddr;
1509 ip6->ip6_dst = inp->in6p_faddr;
1510 } else if (open_type == OPEN_TYPE_LISTEN) {
1511 ip6->ip6_src = inp->in6p_laddr;
1512 ip6->ip6_dst = ip6->ip6_src;
1515 len += sizeof(*ip6);
1517 struct ip *ip = (void *)&pkt[len];
1519 ip->ip_v = IPVERSION;
1520 ip->ip_hl = sizeof(*ip) >> 2;
1521 ip->ip_tos = inp->inp_ip_tos;
1522 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
1523 ip->ip_ttl = inp->inp_ip_ttl;
1524 ip->ip_p = IPPROTO_TCP;
1525 if (open_type == OPEN_TYPE_ACTIVE) {
1526 ip->ip_src = inp->inp_laddr;
1527 ip->ip_dst = inp->inp_faddr;
1528 } else if (open_type == OPEN_TYPE_LISTEN) {
1529 ip->ip_src = inp->inp_laddr;
1530 ip->ip_dst = ip->ip_src;
1536 th = (void *)&pkt[len];
1537 if (open_type == OPEN_TYPE_ACTIVE) {
1538 th->th_sport = inp->inp_lport; /* network byte order already */
1539 th->th_dport = inp->inp_fport; /* ditto */
1540 } else if (open_type == OPEN_TYPE_LISTEN) {
1541 th->th_sport = inp->inp_lport; /* network byte order already */
1542 th->th_dport = th->th_sport;
1546 *pktlen = *buflen = len;
1550 const struct offload_settings *
1551 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m,
1552 uint16_t vtag, struct inpcb *inp)
1554 const struct t4_offload_policy *op;
1556 struct offload_rule *r;
1557 int i, matched, pktlen, buflen;
1558 static const struct offload_settings allow_offloading_settings = {
1573 static const struct offload_settings disallow_offloading_settings = {
1575 /* rest is irrelevant when offload is off. */
1578 rw_assert(&sc->policy_lock, RA_LOCKED);
1581 * If there's no Connection Offloading Policy attached to the device
1582 * then we need to return a default static policy. If
1583 * "cop_managed_offloading" is true, then we need to disallow
1584 * offloading until a COP is attached to the device. Otherwise we
1585 * allow offloading ...
1589 if (sc->tt.cop_managed_offloading)
1590 return (&disallow_offloading_settings);
1592 return (&allow_offloading_settings);
1595 switch (open_type) {
1596 case OPEN_TYPE_ACTIVE:
1597 case OPEN_TYPE_LISTEN:
1598 pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen);
1600 case OPEN_TYPE_PASSIVE:
1602 pkt = mtod(m, char *);
1603 MPASS(*pkt == CPL_PASS_ACCEPT_REQ);
1604 pkt += sizeof(struct cpl_pass_accept_req);
1605 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req);
1606 buflen = m->m_len - sizeof(struct cpl_pass_accept_req);
1610 return (&disallow_offloading_settings);
1613 if (pkt == NULL || pktlen == 0 || buflen == 0)
1614 return (&disallow_offloading_settings);
1618 for (i = 0; i < op->nrules; i++, r++) {
1619 if (r->open_type != open_type &&
1620 r->open_type != OPEN_TYPE_DONTCARE) {
1623 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen);
1628 if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN)
1631 return (matched ? &r->settings : &disallow_offloading_settings);
1635 reclaim_wr_resources(void *arg, int count)
1637 struct tom_data *td = arg;
1638 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
1639 struct cpl_act_open_req *cpl;
1640 u_int opcode, atid, tid;
1642 struct adapter *sc = td_adapter(td);
1644 mtx_lock(&td->unsent_wr_lock);
1645 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
1646 mtx_unlock(&td->unsent_wr_lock);
1648 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
1649 STAILQ_REMOVE_HEAD(&twr_list, link);
1652 opcode = GET_OPCODE(cpl);
1655 case CPL_ACT_OPEN_REQ:
1656 case CPL_ACT_OPEN_REQ6:
1657 atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
1658 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
1659 act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
1662 case CPL_PASS_ACCEPT_RPL:
1664 CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid);
1665 synack_failure_cleanup(sc, tid);
1669 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
1670 "opcode %x\n", __func__, wr, wr->wr_len, opcode);
1671 /* WR not freed here; go look at it with a debugger. */
1677 * Ground control to Major TOM
1678 * Commencing countdown, engines on
1681 t4_tom_activate(struct adapter *sc)
1683 struct tom_data *td;
1688 ASSERT_SYNCHRONIZED_OP(sc);
1690 /* per-adapter softc for TOM */
1691 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
1695 /* List of TOE PCBs and associated lock */
1696 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
1697 TAILQ_INIT(&td->toep_list);
1699 /* Listen context */
1700 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
1701 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
1702 &td->listen_mask, HASH_NOWAIT);
1704 /* List of WRs for which L2 resolution failed */
1705 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
1706 STAILQ_INIT(&td->unsent_wr_list);
1707 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
1710 rc = alloc_tid_tabs(&sc->tids);
1714 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp,
1715 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods");
1718 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK,
1719 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask);
1721 alloc_tcb_history(sc, td);
1726 tod->tod_softc = sc;
1727 tod->tod_connect = t4_connect;
1728 tod->tod_listen_start = t4_listen_start;
1729 tod->tod_listen_stop = t4_listen_stop;
1730 tod->tod_rcvd = t4_rcvd;
1731 tod->tod_output = t4_tod_output;
1732 tod->tod_send_rst = t4_send_rst;
1733 tod->tod_send_fin = t4_send_fin;
1734 tod->tod_pcb_detach = t4_pcb_detach;
1735 tod->tod_l2_update = t4_l2_update;
1736 tod->tod_syncache_added = t4_syncache_added;
1737 tod->tod_syncache_removed = t4_syncache_removed;
1738 tod->tod_syncache_respond = t4_syncache_respond;
1739 tod->tod_offload_socket = t4_offload_socket;
1740 tod->tod_ctloutput = t4_ctloutput;
1741 tod->tod_tcp_info = t4_tcp_info;
1743 tod->tod_alloc_tls_session = t4_alloc_tls_session;
1746 for_each_port(sc, i) {
1747 for_each_vi(sc->port[i], v, vi) {
1748 TOEDEV(vi->ifp) = &td->tod;
1753 register_toedev(sc->tom_softc);
1757 free_tom_data(sc, td);
1762 t4_tom_deactivate(struct adapter *sc)
1765 struct tom_data *td = sc->tom_softc;
1767 ASSERT_SYNCHRONIZED_OP(sc);
1770 return (0); /* XXX. KASSERT? */
1772 if (sc->offload_map != 0)
1773 return (EBUSY); /* at least one port has IFCAP_TOE enabled */
1775 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
1776 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */
1778 mtx_lock(&td->toep_list_lock);
1779 if (!TAILQ_EMPTY(&td->toep_list))
1781 mtx_unlock(&td->toep_list_lock);
1783 mtx_lock(&td->lctx_hash_lock);
1784 if (td->lctx_count > 0)
1786 mtx_unlock(&td->lctx_hash_lock);
1788 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
1789 mtx_lock(&td->unsent_wr_lock);
1790 if (!STAILQ_EMPTY(&td->unsent_wr_list))
1792 mtx_unlock(&td->unsent_wr_lock);
1795 unregister_toedev(sc->tom_softc);
1796 free_tom_data(sc, td);
1797 sc->tom_softc = NULL;
1804 t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
1806 struct tcpcb *tp = so_sototcpcb(so);
1807 struct toepcb *toep = tp->t_toe;
1810 if (ulp_mode(toep) == ULP_MODE_TCPDDP) {
1811 error = t4_aio_queue_ddp(so, job);
1812 if (error != EOPNOTSUPP)
1816 return (t4_aio_queue_aiotx(so, job));
1820 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt)
1823 if (sopt->sopt_level != IPPROTO_TCP)
1824 return (tcp_ctloutput(so, sopt));
1826 switch (sopt->sopt_name) {
1827 case TCP_TLSOM_SET_TLS_CONTEXT:
1828 case TCP_TLSOM_GET_TLS_TOM:
1829 case TCP_TLSOM_CLR_TLS_TOM:
1830 case TCP_TLSOM_CLR_QUIES:
1831 return (t4_ctloutput_tls(so, sopt));
1833 return (tcp_ctloutput(so, sopt));
1838 t4_tom_mod_load(void)
1840 struct protosw *tcp_protosw, *tcp6_protosw;
1843 t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl);
1844 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2,
1846 t4_init_connect_cpl_handlers();
1847 t4_init_listen_cpl_handlers();
1848 t4_init_cpl_io_handlers();
1853 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
1854 if (tcp_protosw == NULL)
1855 return (ENOPROTOOPT);
1856 bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw));
1857 bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs));
1858 toe_usrreqs.pru_aio_queue = t4_aio_queue_tom;
1859 toe_protosw.pr_ctloutput = t4_ctloutput_tom;
1860 toe_protosw.pr_usrreqs = &toe_usrreqs;
1862 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM);
1863 if (tcp6_protosw == NULL)
1864 return (ENOPROTOOPT);
1865 bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw));
1866 bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs));
1867 toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom;
1868 toe6_protosw.pr_ctloutput = t4_ctloutput_tom;
1869 toe6_protosw.pr_usrreqs = &toe6_usrreqs;
1871 return (t4_register_uld(&tom_uld_info));
1875 tom_uninit(struct adapter *sc, void *arg __unused)
1877 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
1880 /* Try to free resources (works only if no port has IFCAP_TOE) */
1881 if (uld_active(sc, ULD_TOM))
1882 t4_deactivate_uld(sc, ULD_TOM);
1884 end_synchronized_op(sc, 0);
1888 t4_tom_mod_unload(void)
1890 t4_iterate(tom_uninit, NULL);
1892 if (t4_unregister_uld(&tom_uld_info) == EBUSY)
1895 t4_tls_mod_unload();
1896 t4_ddp_mod_unload();
1898 t4_uninit_connect_cpl_handlers();
1899 t4_uninit_listen_cpl_handlers();
1900 t4_uninit_cpl_io_handlers();
1901 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM);
1902 t4_register_cpl_handler(CPL_GET_TCB_RPL, NULL);
1906 #endif /* TCP_OFFLOAD */
1909 t4_tom_modevent(module_t mod, int cmd, void *arg)
1916 rc = t4_tom_mod_load();
1920 rc = t4_tom_mod_unload();
1927 printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
1933 static moduledata_t t4_tom_moddata= {
1939 MODULE_VERSION(t4_tom, 1);
1940 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
1941 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
1942 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);