2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2007, Myricom Inc.
5 * Copyright (c) 2008, Intel Corporation.
6 * Copyright (c) 2012 The FreeBSD Foundation
7 * Copyright (c) 2016-2021 Mellanox Technologies.
10 * Portions of this software were developed by Bjoern Zeeb
11 * under sponsorship from the FreeBSD Foundation.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_inet6.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sockbuf.h>
49 #include <sys/sysctl.h>
52 #include <net/if_var.h>
53 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_private.h>
59 #include <net/if_types.h>
60 #include <net/infiniband.h>
61 #include <net/if_lagg.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip6.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip_var.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet6/in6_pcb.h>
70 #include <netinet/tcp.h>
71 #include <netinet/tcp_seq.h>
72 #include <netinet/tcp_lro.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcpip.h>
75 #include <netinet/tcp_hpts.h>
76 #include <netinet/tcp_log_buf.h>
77 #include <netinet/tcp_fsm.h>
78 #include <netinet/udp.h>
79 #include <netinet6/ip6_var.h>
81 #include <machine/in_cksum.h>
83 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
85 #define TCP_LRO_TS_OPTION \
86 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
87 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
89 static void tcp_lro_rx_done(struct lro_ctrl *lc);
90 static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
91 uint32_t csum, bool use_hash);
94 static bool do_bpf_strip_and_compress(struct tcpcb *, struct lro_ctrl *,
95 struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **,
96 bool *, bool, bool, struct ifnet *, bool);
100 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
103 static long tcplro_stacks_wanting_mbufq;
104 counter_u64_t tcp_inp_lro_direct_queue;
105 counter_u64_t tcp_inp_lro_wokeup_queue;
106 counter_u64_t tcp_inp_lro_compressed;
107 counter_u64_t tcp_inp_lro_locks_taken;
108 counter_u64_t tcp_extra_mbuf;
109 counter_u64_t tcp_would_have_but;
110 counter_u64_t tcp_comp_total;
111 counter_u64_t tcp_uncomp_total;
112 counter_u64_t tcp_bad_csums;
114 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
115 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
116 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
117 "default number of LRO entries");
119 static uint32_t tcp_lro_cpu_set_thresh = TCP_LRO_CPU_DECLARATION_THRESH;
120 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_cpu_threshold,
121 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_cpu_set_thresh, 0,
122 "Number of interrupts in a row on the same CPU that will make us declare an 'affinity' cpu?");
124 static uint32_t tcp_less_accurate_lro_ts = 0;
125 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_less_accurate,
126 CTLFLAG_MPSAFE, &tcp_less_accurate_lro_ts, 0,
127 "Do we trade off efficency by doing less timestamp operations for time accuracy?");
129 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD,
130 &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport");
131 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD,
132 &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts");
133 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD,
134 &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport");
135 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD,
136 &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken");
137 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD,
138 &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp");
139 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD,
140 &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed");
141 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD,
142 &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set");
143 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD,
144 &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP");
145 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lro_badcsum, CTLFLAG_RD,
146 &tcp_bad_csums, "Number of packets that the common code saw with bad csums");
149 tcp_lro_reg_mbufq(void)
151 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1);
155 tcp_lro_dereg_mbufq(void)
157 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1);
161 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
162 struct lro_entry *le)
165 LIST_INSERT_HEAD(&lc->lro_active, le, next);
166 LIST_INSERT_HEAD(bucket, le, hash_next);
170 tcp_lro_active_remove(struct lro_entry *le)
173 LIST_REMOVE(le, next); /* active list */
174 LIST_REMOVE(le, hash_next); /* hash bucket */
178 tcp_lro_init(struct lro_ctrl *lc)
180 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
184 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
185 unsigned lro_entries, unsigned lro_mbufs)
187 struct lro_entry *le;
189 unsigned i, elements;
191 lc->lro_bad_csum = 0;
194 lc->lro_mbuf_count = 0;
195 lc->lro_mbuf_max = lro_mbufs;
196 lc->lro_cnt = lro_entries;
197 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
198 lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
200 LIST_INIT(&lc->lro_free);
201 LIST_INIT(&lc->lro_active);
203 /* create hash table to accelerate entry lookup */
204 if (lro_entries > lro_mbufs)
205 elements = lro_entries;
207 elements = lro_mbufs;
208 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
210 if (lc->lro_hash == NULL) {
211 memset(lc, 0, sizeof(*lc));
215 /* compute size to allocate */
216 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
217 (lro_entries * sizeof(*le));
218 lc->lro_mbuf_data = (struct lro_mbuf_sort *)
219 malloc(size, M_LRO, M_NOWAIT | M_ZERO);
221 /* check for out of memory */
222 if (lc->lro_mbuf_data == NULL) {
223 free(lc->lro_hash, M_LRO);
224 memset(lc, 0, sizeof(*lc));
227 /* compute offset for LRO entries */
228 le = (struct lro_entry *)
229 (lc->lro_mbuf_data + lro_mbufs);
231 /* setup linked list */
232 for (i = 0; i != lro_entries; i++)
233 LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
238 struct vxlan_header {
244 tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan, int mlen)
246 const struct ether_vlan_header *eh;
251 memset(parser, 0, sizeof(*parser));
256 const struct vxlan_header *vxh;
258 ptr = (uint8_t *)ptr + sizeof(*vxh);
260 parser->data.vxlan_vni =
261 vxh->vxlh_vni & htonl(0xffffff00);
266 if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) {
267 eth_type = eh->evl_proto;
269 /* strip priority and keep VLAN ID only */
270 parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK);
272 /* advance to next header */
273 ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
274 mlen -= (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
276 eth_type = eh->evl_encap_proto;
277 /* advance to next header */
278 mlen -= ETHER_HDR_LEN;
279 ptr = (uint8_t *)ptr + ETHER_HDR_LEN;
281 if (__predict_false(mlen <= 0))
285 case htons(ETHERTYPE_IP):
287 if (__predict_false(mlen < sizeof(struct ip)))
289 /* Ensure there are no IPv4 options. */
290 if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4))
292 /* .. and the packet is not fragmented. */
293 if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK))
295 /* .. and the packet has valid src/dst addrs */
296 if (__predict_false(parser->ip4->ip_src.s_addr == INADDR_ANY ||
297 parser->ip4->ip_dst.s_addr == INADDR_ANY))
299 ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2);
300 mlen -= sizeof(struct ip);
302 parser->data.s_addr.v4 = parser->ip4->ip_src;
303 parser->data.d_addr.v4 = parser->ip4->ip_dst;
305 switch (parser->ip4->ip_p) {
307 if (__predict_false(mlen < sizeof(struct udphdr)))
311 parser->data.lro_type = LRO_TYPE_IPV4_UDP;
312 parser->data.s_port = parser->udp->uh_sport;
313 parser->data.d_port = parser->udp->uh_dport;
315 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP);
317 ptr = ((uint8_t *)ptr + sizeof(*parser->udp));
318 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
322 if (__predict_false(mlen < sizeof(struct tcphdr)))
325 parser->data.lro_type = LRO_TYPE_IPV4_TCP;
326 parser->data.s_port = parser->tcp->th_sport;
327 parser->data.d_port = parser->tcp->th_dport;
329 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP);
331 if (__predict_false(mlen < (parser->tcp->th_off << 2)))
333 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
334 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
342 case htons(ETHERTYPE_IPV6):
344 if (__predict_false(mlen < sizeof(struct ip6_hdr)))
346 /* Ensure the packet has valid src/dst addrs */
347 if (__predict_false(IN6_IS_ADDR_UNSPECIFIED(&parser->ip6->ip6_src) ||
348 IN6_IS_ADDR_UNSPECIFIED(&parser->ip6->ip6_dst)))
350 ptr = (uint8_t *)ptr + sizeof(*parser->ip6);
352 parser->data.s_addr.v6 = parser->ip6->ip6_src;
353 parser->data.d_addr.v6 = parser->ip6->ip6_dst;
355 mlen -= sizeof(struct ip6_hdr);
356 switch (parser->ip6->ip6_nxt) {
358 if (__predict_false(mlen < sizeof(struct udphdr)))
362 parser->data.lro_type = LRO_TYPE_IPV6_UDP;
363 parser->data.s_port = parser->udp->uh_sport;
364 parser->data.d_port = parser->udp->uh_dport;
366 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP);
368 ptr = (uint8_t *)ptr + sizeof(*parser->udp);
369 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
372 if (__predict_false(mlen < sizeof(struct tcphdr)))
376 parser->data.lro_type = LRO_TYPE_IPV6_TCP;
377 parser->data.s_port = parser->tcp->th_sport;
378 parser->data.d_port = parser->tcp->th_dport;
380 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP);
382 if (__predict_false(mlen < (parser->tcp->th_off << 2)))
384 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
385 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
395 /* Invalid packet - cannot parse */
399 static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
400 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
402 static inline struct lro_parser *
403 tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data)
407 /* Try to parse outer headers first. */
408 data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false, m->m_len);
409 if (data_ptr == NULL || po->total_hdr_len > m->m_len)
413 /* Store VLAN ID, if any. */
414 if (__predict_false(m->m_flags & M_VLANTAG)) {
416 htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK);
418 /* Store decrypted flag, if any. */
419 if (__predict_false((m->m_pkthdr.csum_flags &
420 CSUM_TLS_MASK) == CSUM_TLS_DECRYPTED))
421 po->data.lro_flags |= LRO_FLAG_DECRYPTED;
424 switch (po->data.lro_type) {
425 case LRO_TYPE_IPV4_UDP:
426 case LRO_TYPE_IPV6_UDP:
427 /* Check for VXLAN headers. */
428 if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum)
431 /* Try to parse inner headers. */
432 data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true,
433 (m->m_len - ((caddr_t)data_ptr - m->m_data)));
434 if (data_ptr == NULL || (pi->total_hdr_len + po->total_hdr_len) > m->m_len)
437 /* Verify supported header types. */
438 switch (pi->data.lro_type) {
439 case LRO_TYPE_IPV4_TCP:
440 case LRO_TYPE_IPV6_TCP:
446 case LRO_TYPE_IPV4_TCP:
447 case LRO_TYPE_IPV6_TCP:
449 memset(pi, 0, sizeof(*pi));
458 tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po)
462 switch (po->data.lro_type) {
464 case LRO_TYPE_IPV4_TCP:
465 len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) +
466 ntohs(po->ip4->ip_len);
470 case LRO_TYPE_IPV6_TCP:
471 len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) +
472 ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6);
476 return (TCP_LRO_CANNOT);
480 * If the frame is padded beyond the end of the IP packet,
481 * then trim the extra bytes off:
483 if (__predict_true(m->m_pkthdr.len == len)) {
485 } else if (m->m_pkthdr.len > len) {
486 m_adj(m, len - m->m_pkthdr.len);
489 return (TCP_LRO_CANNOT);
492 static struct tcphdr *
493 tcp_lro_get_th(struct mbuf *m)
495 return ((struct tcphdr *)((uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off));
499 lro_free_mbuf_chain(struct mbuf *m)
512 tcp_lro_free(struct lro_ctrl *lc)
514 struct lro_entry *le;
517 /* reset LRO free list */
518 LIST_INIT(&lc->lro_free);
520 /* free active mbufs, if any */
521 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
522 tcp_lro_active_remove(le);
523 lro_free_mbuf_chain(le->m_head);
526 /* free hash table */
527 free(lc->lro_hash, M_LRO);
531 /* free mbuf array, if any */
532 for (x = 0; x != lc->lro_mbuf_count; x++)
533 m_freem(lc->lro_mbuf_data[x].mb);
534 lc->lro_mbuf_count = 0;
536 /* free allocated memory, if any */
537 free(lc->lro_mbuf_data, M_LRO);
538 lc->lro_mbuf_data = NULL;
542 tcp_lro_rx_csum_tcphdr(const struct tcphdr *th)
548 csum = -th->th_sum; /* exclude checksum field */
550 ptr = (const uint16_t *)th;
557 while (csum > 0xffff)
558 csum = (csum >> 16) + (csum & 0xffff);
564 tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum)
571 switch (pa->data.lro_type) {
573 case LRO_TYPE_IPV6_TCP:
574 /* Compute full pseudo IPv6 header checksum. */
575 cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0);
579 case LRO_TYPE_IPV4_TCP:
580 /* Compute full pseudo IPv4 header checsum. */
581 cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP);
582 cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs));
586 cs = 0; /* Keep compiler happy. */
590 /* Complement checksum. */
594 /* Remove TCP header checksum. */
595 cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp);
598 /* Compute checksum remainder. */
600 c = (c >> 16) + (c & 0xffff);
606 tcp_lro_rx_done(struct lro_ctrl *lc)
608 struct lro_entry *le;
610 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
611 tcp_lro_active_remove(le);
612 tcp_lro_flush(lc, le);
617 tcp_lro_flush_active(struct lro_ctrl *lc)
619 struct lro_entry *le;
622 * Walk through the list of le entries, and
623 * any one that does have packets flush. This
624 * is called because we have an inbound packet
625 * (e.g. SYN) that has to have all others flushed
626 * in front of it. Note we have to do the remove
627 * because tcp_lro_flush() assumes that the entry
628 * is being freed. This is ok it will just get
629 * reallocated again like it was new.
631 LIST_FOREACH(le, &lc->lro_active, next) {
632 if (le->m_head != NULL) {
633 tcp_lro_active_remove(le);
634 tcp_lro_flush(lc, le);
640 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
642 struct lro_entry *le, *le_tmp;
647 if (LIST_EMPTY(&lc->lro_active))
650 /* get timeout time and current time in ns */
652 now = bintime2ns(&bt);
653 tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000));
654 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
655 if (now >= (bintime2ns(&le->alloc_time) + tov)) {
656 tcp_lro_active_remove(le);
657 tcp_lro_flush(lc, le);
664 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4)
668 /* Legacy IP has a header checksum that needs to be correct. */
669 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
670 if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) {
672 return (TCP_LRO_CANNOT);
675 csum = in_cksum_hdr(ip4);
676 if (__predict_false(csum != 0)) {
678 return (TCP_LRO_CANNOT);
687 tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc,
688 const struct lro_entry *le, const struct mbuf *m,
689 int frm, int32_t tcp_data_len, uint32_t th_seq,
690 uint32_t th_ack, uint16_t th_win)
692 if (tcp_bblogging_on(tp)) {
693 union tcp_log_stackspecific log;
694 struct timeval tv, btv;
697 cts = tcp_get_usecs(&tv);
698 memset(&log, 0, sizeof(union tcp_log_stackspecific));
699 log.u_bbr.flex8 = frm;
700 log.u_bbr.flex1 = tcp_data_len;
702 log.u_bbr.flex2 = m->m_pkthdr.len;
706 log.u_bbr.flex3 = le->m_head->m_pkthdr.lro_nsegs;
707 log.u_bbr.flex4 = le->m_head->m_pkthdr.lro_tcp_d_len;
708 log.u_bbr.flex5 = le->m_head->m_pkthdr.len;
709 log.u_bbr.delRate = le->m_head->m_flags;
710 log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp;
712 log.u_bbr.inflight = th_seq;
713 log.u_bbr.delivered = th_ack;
714 log.u_bbr.timeStamp = cts;
715 log.u_bbr.epoch = le->next_seq;
716 log.u_bbr.lt_epoch = le->ack_seq;
717 log.u_bbr.pacing_gain = th_win;
718 log.u_bbr.cwnd_gain = le->window;
719 log.u_bbr.lost = curcpu;
720 log.u_bbr.cur_del_rate = (uintptr_t)m;
721 log.u_bbr.bw_inuse = (uintptr_t)le->m_head;
722 bintime2timeval(&lc->lro_last_queue_time, &btv);
723 log.u_bbr.flex6 = tcp_tv_to_usectick(&btv);
724 log.u_bbr.flex7 = le->compressed;
725 log.u_bbr.pacing_gain = le->uncompressed;
726 if (in_epoch(net_epoch_preempt))
727 log.u_bbr.inhpts = 1;
729 log.u_bbr.inhpts = 0;
730 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
731 &tptosocket(tp)->so_snd,
732 TCP_LOG_LRO, 0, 0, &log, false, &tv);
738 tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum)
742 csum = 0xffff - *ptr + value;
743 while (csum > 0xffff)
744 csum = (csum >> 16) + (csum & 0xffff);
750 tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le,
751 uint16_t payload_len, uint16_t delta_sum)
755 uint16_t temp[5] = {};
757 switch (pa->data.lro_type) {
758 case LRO_TYPE_IPV4_TCP:
759 /* Compute new IPv4 length. */
760 tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len;
761 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
763 /* Subtract delta from current IPv4 checksum. */
764 csum = pa->ip4->ip_sum + 0xffff - temp[0];
765 while (csum > 0xffff)
766 csum = (csum >> 16) + (csum & 0xffff);
767 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
768 goto update_tcp_header;
770 case LRO_TYPE_IPV6_TCP:
771 /* Compute new IPv6 length. */
772 tlen = (pa->tcp->th_off << 2) + payload_len;
773 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
774 goto update_tcp_header;
776 case LRO_TYPE_IPV4_UDP:
777 /* Compute new IPv4 length. */
778 tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len;
779 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
781 /* Subtract delta from current IPv4 checksum. */
782 csum = pa->ip4->ip_sum + 0xffff - temp[0];
783 while (csum > 0xffff)
784 csum = (csum >> 16) + (csum & 0xffff);
785 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
786 goto update_udp_header;
788 case LRO_TYPE_IPV6_UDP:
789 /* Compute new IPv6 length. */
790 tlen = sizeof(*pa->udp) + payload_len;
791 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
792 goto update_udp_header;
799 /* Compute current TCP header checksum. */
800 temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp);
802 /* Incorporate the latest ACK into the TCP header. */
803 pa->tcp->th_ack = le->ack_seq;
804 pa->tcp->th_win = le->window;
806 /* Incorporate latest timestamp into the TCP header. */
807 if (le->timestamp != 0) {
810 ts_ptr = (uint32_t *)(pa->tcp + 1);
811 ts_ptr[1] = htonl(le->tsval);
812 ts_ptr[2] = le->tsecr;
815 /* Compute new TCP header checksum. */
816 temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp);
818 /* Compute new TCP checksum. */
819 csum = pa->tcp->th_sum + 0xffff - delta_sum +
820 0xffff - temp[0] + 0xffff - temp[3] + temp[2];
821 while (csum > 0xffff)
822 csum = (csum >> 16) + (csum & 0xffff);
824 /* Assign new TCP checksum. */
825 tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]);
827 /* Compute all modififications affecting next checksum. */
828 csum = temp[0] + temp[1] + 0xffff - temp[2] +
829 temp[3] + temp[4] + delta_sum;
830 while (csum > 0xffff)
831 csum = (csum >> 16) + (csum & 0xffff);
833 /* Return delta checksum to next stage, if any. */
837 tlen = sizeof(*pa->udp) + payload_len;
838 /* Assign new UDP length and compute checksum delta. */
839 tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]);
841 /* Check if there is a UDP checksum. */
842 if (__predict_false(pa->udp->uh_sum != 0)) {
843 /* Compute new UDP checksum. */
844 csum = pa->udp->uh_sum + 0xffff - delta_sum +
845 0xffff - temp[0] + 0xffff - temp[2];
846 while (csum > 0xffff)
847 csum = (csum >> 16) + (csum & 0xffff);
848 /* Assign new UDP checksum. */
849 tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]);
852 /* Compute all modififications affecting next checksum. */
853 csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum;
854 while (csum > 0xffff)
855 csum = (csum >> 16) + (csum & 0xffff);
857 /* Return delta checksum to next stage, if any. */
862 tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
864 /* Check if we need to recompute any checksums. */
865 if (le->needs_merge) {
868 switch (le->inner.data.lro_type) {
869 case LRO_TYPE_IPV4_TCP:
870 csum = tcp_lro_update_checksum(&le->inner, le,
871 le->m_head->m_pkthdr.lro_tcp_d_len,
872 le->m_head->m_pkthdr.lro_tcp_d_csum);
873 csum = tcp_lro_update_checksum(&le->outer, NULL,
874 le->m_head->m_pkthdr.lro_tcp_d_len +
875 le->inner.total_hdr_len, csum);
876 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
877 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
878 le->m_head->m_pkthdr.csum_data = 0xffff;
879 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
880 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
882 case LRO_TYPE_IPV6_TCP:
883 csum = tcp_lro_update_checksum(&le->inner, le,
884 le->m_head->m_pkthdr.lro_tcp_d_len,
885 le->m_head->m_pkthdr.lro_tcp_d_csum);
886 csum = tcp_lro_update_checksum(&le->outer, NULL,
887 le->m_head->m_pkthdr.lro_tcp_d_len +
888 le->inner.total_hdr_len, csum);
889 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
891 le->m_head->m_pkthdr.csum_data = 0xffff;
892 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
893 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
896 switch (le->outer.data.lro_type) {
897 case LRO_TYPE_IPV4_TCP:
898 csum = tcp_lro_update_checksum(&le->outer, le,
899 le->m_head->m_pkthdr.lro_tcp_d_len,
900 le->m_head->m_pkthdr.lro_tcp_d_csum);
901 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
902 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
903 le->m_head->m_pkthdr.csum_data = 0xffff;
904 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
905 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
907 case LRO_TYPE_IPV6_TCP:
908 csum = tcp_lro_update_checksum(&le->outer, le,
909 le->m_head->m_pkthdr.lro_tcp_d_len,
910 le->m_head->m_pkthdr.lro_tcp_d_csum);
911 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
913 le->m_head->m_pkthdr.csum_data = 0xffff;
914 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
915 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
927 * Break any chain, this is not set to NULL on the singleton
928 * case m_nextpkt points to m_head. Other case set them
929 * m_nextpkt to NULL in push_and_replace.
931 le->m_head->m_nextpkt = NULL;
932 lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs;
933 (*lc->ifp->if_input)(lc->ifp, le->m_head);
937 tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le,
938 struct mbuf *m, struct tcphdr *th)
941 uint16_t tcp_data_len;
942 uint16_t tcp_opt_len;
944 ts_ptr = (uint32_t *)(th + 1);
945 tcp_opt_len = (th->th_off << 2);
946 tcp_opt_len -= sizeof(*th);
948 /* Check if there is a timestamp option. */
949 if (tcp_opt_len == 0 ||
950 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
951 *ts_ptr != TCP_LRO_TS_OPTION)) {
952 /* We failed to find the timestamp option. */
956 le->tsval = ntohl(*(ts_ptr + 1));
957 le->tsecr = *(ts_ptr + 2);
960 tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
962 /* Pull out TCP sequence numbers and window size. */
963 le->next_seq = ntohl(th->th_seq) + tcp_data_len;
964 le->ack_seq = th->th_ack;
965 le->window = th->th_win;
966 le->flags = tcp_get_flags(th);
969 /* Setup new data pointers. */
971 le->m_tail = m_last(m);
975 tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
977 struct lro_parser *pa;
980 * Push up the stack of the current entry
981 * and replace it with "m".
985 /* Grab off the next and save it */
986 msave = le->m_head->m_nextpkt;
987 le->m_head->m_nextpkt = NULL;
989 /* Now push out the old entry */
990 tcp_flush_out_entry(lc, le);
992 /* Re-parse new header, should not fail. */
993 pa = tcp_lro_parser(m, &le->outer, &le->inner, false);
995 ("tcp_push_and_replace: LRO parser failed on m=%p\n", m));
998 * Now to replace the data properly in the entry
999 * we have to reset the TCP header and
1002 tcp_set_entry_to_mbuf(lc, le, m, pa->tcp);
1004 /* Restore the next list */
1005 m->m_nextpkt = msave;
1009 tcp_lro_mbuf_append_pkthdr(struct lro_entry *le, const struct mbuf *p)
1015 if (m->m_pkthdr.lro_nsegs == 1) {
1016 /* Compute relative checksum. */
1017 csum = p->m_pkthdr.lro_tcp_d_csum;
1019 /* Merge TCP data checksums. */
1020 csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum +
1021 (uint32_t)p->m_pkthdr.lro_tcp_d_csum;
1022 while (csum > 0xffff)
1023 csum = (csum >> 16) + (csum & 0xffff);
1026 /* Update various counters. */
1027 m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len;
1028 m->m_pkthdr.lro_tcp_d_csum = csum;
1029 m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len;
1030 m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs;
1031 le->needs_merge = 1;
1035 tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
1038 * Walk through the mbuf chain we
1039 * have on tap and compress/condense
1045 uint32_t tcp_data_len_total;
1046 uint32_t tcp_data_seg_total;
1047 uint16_t tcp_data_len;
1048 uint16_t tcp_opt_len;
1051 * First we must check the lead (m_head)
1052 * we must make sure that it is *not*
1053 * something that should be sent up
1054 * right away (sack etc).
1057 m = le->m_head->m_nextpkt;
1059 /* Just one left. */
1063 th = tcp_lro_get_th(m);
1064 tcp_opt_len = (th->th_off << 2);
1065 tcp_opt_len -= sizeof(*th);
1066 ts_ptr = (uint32_t *)(th + 1);
1068 if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1069 *ts_ptr != TCP_LRO_TS_OPTION)) {
1071 * Its not the timestamp. We can't
1072 * use this guy as the head.
1074 le->m_head->m_nextpkt = m->m_nextpkt;
1075 tcp_push_and_replace(lc, le, m);
1078 if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) {
1080 * Make sure that previously seen segments/ACKs are delivered
1081 * before this segment, e.g. FIN.
1083 le->m_head->m_nextpkt = m->m_nextpkt;
1084 tcp_push_and_replace(lc, le, m);
1087 while((m = le->m_head->m_nextpkt) != NULL) {
1089 * condense m into le, first
1090 * pull m out of the list.
1092 le->m_head->m_nextpkt = m->m_nextpkt;
1093 m->m_nextpkt = NULL;
1095 tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
1096 th = tcp_lro_get_th(m);
1097 ts_ptr = (uint32_t *)(th + 1);
1098 tcp_opt_len = (th->th_off << 2);
1099 tcp_opt_len -= sizeof(*th);
1100 tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len;
1101 tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs;
1103 if (tcp_data_seg_total >= lc->lro_ackcnt_lim ||
1104 tcp_data_len_total >= lc->lro_length_lim) {
1105 /* Flush now if appending will result in overflow. */
1106 tcp_push_and_replace(lc, le, m);
1109 if (tcp_opt_len != 0 &&
1110 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1111 *ts_ptr != TCP_LRO_TS_OPTION)) {
1113 * Maybe a sack in the new one? We need to
1114 * start all over after flushing the
1115 * current le. We will go up to the beginning
1116 * and flush it (calling the replace again possibly
1117 * or just returning).
1119 tcp_push_and_replace(lc, le, m);
1122 if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) {
1123 tcp_push_and_replace(lc, le, m);
1126 if (tcp_opt_len != 0) {
1127 uint32_t tsval = ntohl(*(ts_ptr + 1));
1128 /* Make sure timestamp values are increasing. */
1129 if (TSTMP_GT(le->tsval, tsval)) {
1130 tcp_push_and_replace(lc, le, m);
1134 le->tsecr = *(ts_ptr + 2);
1136 /* Try to append the new segment. */
1137 if (__predict_false(ntohl(th->th_seq) != le->next_seq ||
1138 ((tcp_get_flags(th) & TH_ACK) !=
1139 (le->flags & TH_ACK)) ||
1140 (tcp_data_len == 0 &&
1141 le->ack_seq == th->th_ack &&
1142 le->window == th->th_win))) {
1143 /* Out of order packet, non-ACK + ACK or dup ACK. */
1144 tcp_push_and_replace(lc, le, m);
1147 if (tcp_data_len != 0 ||
1148 SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1149 le->next_seq += tcp_data_len;
1150 le->ack_seq = th->th_ack;
1151 le->window = th->th_win;
1152 le->needs_merge = 1;
1153 } else if (th->th_ack == le->ack_seq) {
1154 if (WIN_GT(th->th_win, le->window)) {
1155 le->window = th->th_win;
1156 le->needs_merge = 1;
1160 if (tcp_data_len == 0) {
1165 /* Merge TCP data checksum and length to head mbuf. */
1166 tcp_lro_mbuf_append_pkthdr(le, m);
1169 * Adjust the mbuf so that m_data points to the first byte of
1170 * the ULP payload. Adjust the mbuf to avoid complications and
1171 * append new segment to existing mbuf chain.
1173 m_adj(m, m->m_pkthdr.len - tcp_data_len);
1175 le->m_tail->m_next = m;
1176 le->m_tail = m_last(m);
1182 tcp_queue_pkts(struct tcpcb *tp, struct lro_entry *le)
1185 INP_WLOCK_ASSERT(tptoinpcb(tp));
1187 STAILQ_HEAD(, mbuf) q = { le->m_head,
1188 &STAILQ_NEXT(le->m_last_mbuf, m_stailqpkt) };
1189 STAILQ_CONCAT(&tp->t_inqueue, &q);
1191 le->m_last_mbuf = NULL;
1195 tcp_lro_check_wake_status(struct tcpcb *tp)
1198 if (tp->t_fb->tfb_early_wake_check != NULL)
1199 return ((tp->t_fb->tfb_early_wake_check)(tp));
1203 static struct mbuf *
1204 tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le,
1205 struct tcpcb *tp, int32_t *new_m, bool can_append_old_cmp)
1209 /* Look at the last mbuf if any in queue */
1210 if (can_append_old_cmp) {
1211 m = STAILQ_LAST(&tp->t_inqueue, mbuf, m_stailqpkt);
1212 if (m != NULL && (m->m_flags & M_ACKCMP) != 0) {
1213 if (M_TRAILINGSPACE(m) >= sizeof(struct tcp_ackent)) {
1214 tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0);
1216 counter_u64_add(tcp_extra_mbuf, 1);
1219 /* Mark we ran out of space */
1220 tp->t_flags2 |= TF2_MBUF_L_ACKS;
1224 /* Decide mbuf size. */
1225 tcp_lro_log(tp, lc, le, NULL, 21, 0, 0, 0, 0);
1226 if (tp->t_flags2 & TF2_MBUF_L_ACKS)
1227 m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR);
1229 m = m_gethdr(M_NOWAIT, MT_DATA);
1231 if (__predict_false(m == NULL)) {
1232 counter_u64_add(tcp_would_have_but, 1);
1235 counter_u64_add(tcp_comp_total, 1);
1236 m->m_pkthdr.rcvif = lc->ifp;
1237 m->m_flags |= M_ACKCMP;
1242 static struct tcpcb *
1243 tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa)
1247 switch (pa->data.lro_type) {
1249 case LRO_TYPE_IPV6_TCP:
1250 inp = in6_pcblookup(&V_tcbinfo,
1251 &pa->data.s_addr.v6,
1253 &pa->data.d_addr.v6,
1260 case LRO_TYPE_IPV4_TCP:
1261 inp = in_pcblookup(&V_tcbinfo,
1274 return (intotcpcb(inp));
1278 tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th, uint32_t **ppts, bool *other_opts)
1281 * This function returns two bits of valuable information.
1282 * a) Is what is present capable of being ack-compressed,
1283 * we can ack-compress if there is no options or just
1284 * a timestamp option, and of course the th_flags must
1285 * be correct as well.
1286 * b) Our other options present such as SACK. This is
1287 * used to determine if we want to wakeup or not.
1291 switch (th->th_off << 2) {
1292 case (sizeof(*th) + TCPOLEN_TSTAMP_APPA):
1293 *ppts = (uint32_t *)(th + 1);
1294 /* Check if we have only one timestamp option. */
1295 if (**ppts == TCP_LRO_TS_OPTION)
1296 *other_opts = false;
1305 *other_opts = false;
1313 /* For ACKCMP we only accept ACK, PUSH, ECE and CWR. */
1314 if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0)
1316 /* If it has data on it we cannot compress it */
1317 if (m->m_pkthdr.lro_tcp_d_len)
1320 /* ACK flag must be set. */
1321 if (!(tcp_get_flags(th) & TH_ACK))
1327 tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le)
1330 struct mbuf **pp, *cmp, *mv_to;
1331 struct ifnet *lagg_ifp;
1332 bool bpf_req, lagg_bpf_req, should_wake, can_append_old_cmp;
1334 /* Check if packet doesn't belongs to our network interface. */
1335 if ((tcplro_stacks_wanting_mbufq == 0) ||
1336 (le->outer.data.vlan_id != 0) ||
1337 (le->inner.data.lro_type != LRO_TYPE_NONE))
1338 return (TCP_LRO_CANNOT);
1342 * Be proactive about unspecified IPv6 address in source. As
1343 * we use all-zero to indicate unbounded/unconnected pcb,
1344 * unspecified IPv6 address can be used to confuse us.
1346 * Note that packets with unspecified IPv6 destination is
1347 * already dropped in ip6_input.
1349 if (__predict_false(le->outer.data.lro_type == LRO_TYPE_IPV6_TCP &&
1350 IN6_IS_ADDR_UNSPECIFIED(&le->outer.data.s_addr.v6)))
1351 return (TCP_LRO_CANNOT);
1353 if (__predict_false(le->inner.data.lro_type == LRO_TYPE_IPV6_TCP &&
1354 IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6)))
1355 return (TCP_LRO_CANNOT);
1357 /* Lookup inp, if any. Returns locked TCP inpcb. */
1358 tp = tcp_lro_lookup(lc->ifp,
1359 (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner);
1361 return (TCP_LRO_CANNOT);
1363 counter_u64_add(tcp_inp_lro_locks_taken, 1);
1365 /* Check if the inp is dead, Jim. */
1366 if (tp->t_state == TCPS_TIME_WAIT) {
1367 INP_WUNLOCK(tptoinpcb(tp));
1368 return (TCP_LRO_CANNOT);
1370 if (tp->t_lro_cpu == HPTS_CPU_NONE && lc->lro_cpu_is_set == 1)
1371 tp->t_lro_cpu = lc->lro_last_cpu;
1372 /* Check if the transport doesn't support the needed optimizations. */
1373 if ((tp->t_flags2 & (TF2_SUPPORTS_MBUFQ | TF2_MBUF_ACKCMP)) == 0) {
1374 INP_WUNLOCK(tptoinpcb(tp));
1375 return (TCP_LRO_CANNOT);
1378 if (tp->t_flags2 & TF2_MBUF_QUEUE_READY)
1379 should_wake = false;
1382 /* Check if packets should be tapped to BPF. */
1383 bpf_req = bpf_peers_present(lc->ifp->if_bpf);
1384 lagg_bpf_req = false;
1386 if (lc->ifp->if_type == IFT_IEEE8023ADLAG ||
1387 lc->ifp->if_type == IFT_INFINIBANDLAG) {
1388 struct lagg_port *lp = lc->ifp->if_lagg;
1389 struct lagg_softc *sc = lp->lp_softc;
1391 lagg_ifp = sc->sc_ifp;
1392 if (lagg_ifp != NULL)
1393 lagg_bpf_req = bpf_peers_present(lagg_ifp->if_bpf);
1396 /* Strip and compress all the incoming packets. */
1397 can_append_old_cmp = true;
1399 for (pp = &le->m_head; *pp != NULL; ) {
1401 if (do_bpf_strip_and_compress(tp, lc, le, pp,
1402 &cmp, &mv_to, &should_wake, bpf_req,
1403 lagg_bpf_req, lagg_ifp, can_append_old_cmp) == false) {
1404 /* Advance to next mbuf. */
1405 pp = &(*pp)->m_nextpkt;
1407 * Once we have appended we can't look in the pending
1408 * inbound packets for a compressed ack to append to.
1410 can_append_old_cmp = false;
1412 * Once we append we also need to stop adding to any
1413 * compressed ack we were remembering. A new cmp
1414 * ack will be required.
1417 tcp_lro_log(tp, lc, le, NULL, 25, 0, 0, 0, 0);
1418 } else if (mv_to != NULL) {
1419 /* We are asked to move pp up */
1420 pp = &mv_to->m_nextpkt;
1421 tcp_lro_log(tp, lc, le, NULL, 24, 0, 0, 0, 0);
1423 tcp_lro_log(tp, lc, le, NULL, 26, 0, 0, 0, 0);
1425 /* Update "m_last_mbuf", if any. */
1426 if (pp == &le->m_head)
1427 le->m_last_mbuf = *pp;
1429 le->m_last_mbuf = __containerof(pp, struct mbuf, m_nextpkt);
1431 /* Check if any data mbufs left. */
1432 if (le->m_head != NULL) {
1433 counter_u64_add(tcp_inp_lro_direct_queue, 1);
1434 tcp_lro_log(tp, lc, le, NULL, 22, 1, tp->t_flags2, 0, 1);
1435 tcp_queue_pkts(tp, le);
1439 counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
1440 if ((*tp->t_fb->tfb_do_queued_segments)(tp, 0))
1441 /* TCP cb gone and unlocked. */
1444 INP_WUNLOCK(tptoinpcb(tp));
1446 return (0); /* Success. */
1451 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
1453 /* Only optimise if there are multiple packets waiting. */
1460 CURVNET_SET(lc->ifp->if_vnet);
1461 error = tcp_lro_flush_tcphpts(lc, le);
1465 tcp_lro_condense(lc, le);
1466 tcp_flush_out_entry(lc, le);
1471 bzero(le, sizeof(*le));
1472 LIST_INSERT_HEAD(&lc->lro_free, le, next);
1475 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
1478 * The tcp_lro_sort() routine is comparable to qsort(), except it has
1479 * a worst case complexity limit of O(MIN(N,64)*N), where N is the
1480 * number of elements to sort and 64 is the number of sequence bits
1481 * available. The algorithm is bit-slicing the 64-bit sequence number,
1482 * sorting one bit at a time from the most significant bit until the
1483 * least significant one, skipping the constant bits. This is
1484 * typically called a radix sort.
1487 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
1489 struct lro_mbuf_sort temp;
1496 /* for small arrays insertion sort is faster */
1498 for (x = 1; x < size; x++) {
1500 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
1501 parray[y] = parray[y - 1];
1507 /* compute sequence bits which are constant */
1510 for (x = 0; x != size; x++) {
1511 ones |= parray[x].seq;
1512 zeros |= ~parray[x].seq;
1515 /* compute bits which are not constant into "ones" */
1520 /* pick the most significant bit which is not constant */
1521 ones = tcp_lro_msb_64(ones);
1524 * Move entries having cleared sequence bits to the beginning
1527 for (x = y = 0; y != size; y++) {
1529 if (parray[y].seq & ones)
1533 parray[x] = parray[y];
1538 KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
1541 tcp_lro_sort(parray, x);
1550 tcp_lro_flush_all(struct lro_ctrl *lc)
1557 /* check if no mbufs to flush */
1558 if (lc->lro_mbuf_count == 0)
1560 if (lc->lro_cpu_is_set == 0) {
1561 if (lc->lro_last_cpu == curcpu) {
1562 lc->lro_cnt_of_same_cpu++;
1563 /* Have we reached the threshold to declare a cpu? */
1564 if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh)
1565 lc->lro_cpu_is_set = 1;
1567 lc->lro_last_cpu = curcpu;
1568 lc->lro_cnt_of_same_cpu = 0;
1571 CURVNET_SET(lc->ifp->if_vnet);
1573 /* get current time */
1574 binuptime(&lc->lro_last_queue_time);
1576 /* sort all mbufs according to stream */
1577 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
1579 /* input data into LRO engine, stream by stream */
1581 for (x = 0; x != lc->lro_mbuf_count; x++) {
1585 mb = lc->lro_mbuf_data[x].mb;
1587 /* get sequence number, masking away the packet index */
1588 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
1590 /* check for new stream */
1594 /* flush active streams */
1595 tcp_lro_rx_done(lc);
1598 /* add packet to LRO engine */
1599 if (tcp_lro_rx_common(lc, mb, 0, false) != 0) {
1600 /* Flush anything we have acummulated */
1601 tcp_lro_flush_active(lc);
1602 /* input packet to network layer */
1603 (*lc->ifp->if_input)(lc->ifp, mb);
1610 /* flush active streams */
1611 tcp_lro_rx_done(lc);
1616 lc->lro_mbuf_count = 0;
1621 build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m,
1622 uint32_t *ts_ptr, uint16_t iptos)
1625 * Given a TCP ACK, summarize it down into the small TCP ACK
1628 ae->timestamp = m->m_pkthdr.rcv_tstmp;
1630 if (m->m_flags & M_TSTMP_LRO)
1631 ae->flags |= TSTMP_LRO;
1632 else if (m->m_flags & M_TSTMP)
1633 ae->flags |= TSTMP_HDWR;
1634 ae->seq = ntohl(th->th_seq);
1635 ae->ack = ntohl(th->th_ack);
1636 ae->flags |= tcp_get_flags(th);
1637 if (ts_ptr != NULL) {
1638 ae->ts_value = ntohl(ts_ptr[1]);
1639 ae->ts_echo = ntohl(ts_ptr[2]);
1640 ae->flags |= HAS_TSTMP;
1642 ae->win = ntohs(th->th_win);
1643 ae->codepoint = iptos;
1647 * Do BPF tap for either ACK_CMP packets or MBUF QUEUE type packets
1648 * and strip all, but the IPv4/IPv6 header.
1651 do_bpf_strip_and_compress(struct tcpcb *tp, struct lro_ctrl *lc,
1652 struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to,
1653 bool *should_wake, bool bpf_req, bool lagg_bpf_req, struct ifnet *lagg_ifp, bool can_append_old_cmp)
1658 struct ip6_hdr *ip6;
1663 struct tcp_ackent *ack_ent;
1666 bool other_opts, can_compress;
1672 /* Get current mbuf. */
1675 /* Let the BPF see the packet */
1676 if (__predict_false(bpf_req))
1677 ETHER_BPF_MTAP(lc->ifp, m);
1679 if (__predict_false(lagg_bpf_req))
1680 ETHER_BPF_MTAP(lagg_ifp, m);
1682 tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off;
1683 lro_type = le->inner.data.lro_type;
1686 lro_type = le->outer.data.lro_type;
1688 case LRO_TYPE_IPV4_TCP:
1689 tcp_hdr_offset -= sizeof(*le->outer.ip4);
1690 m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1692 case LRO_TYPE_IPV6_TCP:
1693 tcp_hdr_offset -= sizeof(*le->outer.ip6);
1694 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1700 case LRO_TYPE_IPV4_TCP:
1701 tcp_hdr_offset -= sizeof(*le->outer.ip4);
1702 m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1704 case LRO_TYPE_IPV6_TCP:
1705 tcp_hdr_offset -= sizeof(*le->outer.ip6);
1706 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1712 MPASS(tcp_hdr_offset >= 0);
1714 m_adj(m, tcp_hdr_offset);
1715 m->m_flags |= M_LRO_EHDRSTRP;
1716 m->m_flags &= ~M_ACKCMP;
1717 m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset;
1719 th = tcp_lro_get_th(m);
1721 th->th_sum = 0; /* TCP checksum is valid. */
1723 /* Check if ACK can be compressed */
1724 can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts);
1726 /* Now lets look at the should wake states */
1727 if ((other_opts == true) &&
1728 ((tp->t_flags2 & TF2_DONT_SACK_QUEUE) == 0)) {
1730 * If there are other options (SACK?) and the
1731 * tcp endpoint has not expressly told us it does
1732 * not care about SACKS, then we should wake up.
1734 *should_wake = true;
1735 } else if (*should_wake == false) {
1736 /* Wakeup override check if we are false here */
1737 *should_wake = tcp_lro_check_wake_status(tp);
1739 /* Is the ack compressable? */
1740 if (can_compress == false)
1742 /* Does the TCP endpoint support ACK compression? */
1743 if ((tp->t_flags2 & TF2_MBUF_ACKCMP) == 0)
1746 /* Lets get the TOS/traffic class field */
1747 l3.ptr = mtod(m, void *);
1749 case LRO_TYPE_IPV4_TCP:
1750 iptos = l3.ip4->ip_tos;
1752 case LRO_TYPE_IPV6_TCP:
1753 iptos = IPV6_TRAFFIC_CLASS(l3.ip6);
1756 iptos = 0; /* Keep compiler happy. */
1759 /* Now lets get space if we don't have some already */
1762 nm = tcp_lro_get_last_if_ackcmp(lc, le, tp, &n_mbuf,
1763 can_append_old_cmp);
1764 if (__predict_false(nm == NULL))
1769 * Link in the new cmp ack to our in-order place,
1770 * first set our cmp ack's next to where we are.
1775 * Set it up so mv_to is advanced to our
1776 * compressed ack. This way the caller can
1777 * advance pp to the right place.
1781 * Advance it here locally as well.
1783 pp = &nm->m_nextpkt;
1786 /* We have one already we are working on */
1788 if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) {
1789 /* We ran out of space */
1790 tp->t_flags2 |= TF2_MBUF_L_ACKS;
1794 MPASS(M_TRAILINGSPACE(nm) >= sizeof(struct tcp_ackent));
1795 counter_u64_add(tcp_inp_lro_compressed, 1);
1797 /* We can add in to the one on the tail */
1798 ack_ent = mtod(nm, struct tcp_ackent *);
1799 idx = (nm->m_len / sizeof(struct tcp_ackent));
1800 build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos);
1802 /* Bump the size of both pkt-hdr and len */
1803 nm->m_len += sizeof(struct tcp_ackent);
1804 nm->m_pkthdr.len += sizeof(struct tcp_ackent);
1806 /* Advance to next mbuf before freeing. */
1808 m->m_nextpkt = NULL;
1812 counter_u64_add(tcp_uncomp_total, 1);
1818 static struct lro_head *
1819 tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
1823 if (M_HASHTYPE_ISHASH(m)) {
1824 hash = m->m_pkthdr.flowid;
1826 for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++)
1827 hash += parser->data.raw[i];
1829 return (&lc->lro_hash[hash % lc->lro_hashsz]);
1833 tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
1835 struct lro_parser pi; /* inner address data */
1836 struct lro_parser po; /* outer address data */
1837 struct lro_parser *pa; /* current parser for TCP stream */
1838 struct lro_entry *le;
1839 struct lro_head *bucket;
1844 uint16_t tcp_data_sum;
1847 /* Quickly decide if packet cannot be LRO'ed */
1848 if (__predict_false(V_ipforwarding != 0))
1849 return (TCP_LRO_CANNOT);
1852 /* Quickly decide if packet cannot be LRO'ed */
1853 if (__predict_false(V_ip6_forwarding != 0))
1854 return (TCP_LRO_CANNOT);
1856 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1857 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1858 (m->m_pkthdr.csum_data != 0xffff)) {
1860 * The checksum either did not have hardware offload
1861 * or it was a bad checksum. We can't LRO such
1864 counter_u64_add(tcp_bad_csums, 1);
1865 return (TCP_LRO_CANNOT);
1867 /* We expect a contiguous header [eh, ip, tcp]. */
1868 pa = tcp_lro_parser(m, &po, &pi, true);
1869 if (__predict_false(pa == NULL))
1870 return (TCP_LRO_NOT_SUPPORTED);
1872 /* We don't expect any padding. */
1873 error = tcp_lro_trim_mbuf_chain(m, pa);
1874 if (__predict_false(error != 0))
1878 switch (pa->data.lro_type) {
1879 case LRO_TYPE_IPV4_TCP:
1880 error = tcp_lro_rx_ipv4(lc, m, pa->ip4);
1881 if (__predict_false(error != 0))
1888 /* If no hardware or arrival stamp on the packet add timestamp */
1889 if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) {
1890 m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1891 m->m_flags |= M_TSTMP_LRO;
1894 /* Get pointer to TCP header. */
1897 /* Don't process SYN packets. */
1898 if (__predict_false(tcp_get_flags(th) & TH_SYN))
1899 return (TCP_LRO_CANNOT);
1901 /* Get total TCP header length and compute payload length. */
1902 tcp_opt_len = (th->th_off << 2);
1903 tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th -
1904 (uint8_t *)m->m_data) - tcp_opt_len;
1905 tcp_opt_len -= sizeof(*th);
1907 /* Don't process invalid TCP headers. */
1908 if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0))
1909 return (TCP_LRO_CANNOT);
1911 /* Compute TCP data only checksum. */
1912 if (tcp_data_len == 0)
1913 tcp_data_sum = 0; /* no data, no checksum */
1914 else if (__predict_false(csum != 0))
1915 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum);
1917 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum);
1919 /* Save TCP info in mbuf. */
1920 m->m_nextpkt = NULL;
1921 m->m_pkthdr.rcvif = lc->ifp;
1922 m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum;
1923 m->m_pkthdr.lro_tcp_d_len = tcp_data_len;
1924 m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data);
1925 m->m_pkthdr.lro_nsegs = 1;
1927 /* Get hash bucket. */
1929 bucket = &lc->lro_hash[0];
1931 bucket = tcp_lro_rx_get_bucket(lc, m, pa);
1934 /* Try to find a matching previous segment. */
1935 LIST_FOREACH(le, bucket, hash_next) {
1936 /* Compare addresses and ports. */
1937 if (lro_address_compare(&po.data, &le->outer.data) == false ||
1938 lro_address_compare(&pi.data, &le->inner.data) == false)
1941 /* Check if no data and old ACK. */
1942 if (tcp_data_len == 0 &&
1943 SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1948 /* Mark "m" in the last spot. */
1949 le->m_last_mbuf->m_nextpkt = m;
1950 /* Now set the tail to "m". */
1951 le->m_last_mbuf = m;
1955 /* Try to find an empty slot. */
1956 if (LIST_EMPTY(&lc->lro_free))
1957 return (TCP_LRO_NO_ENTRIES);
1959 /* Start a new segment chain. */
1960 le = LIST_FIRST(&lc->lro_free);
1961 LIST_REMOVE(le, next);
1962 tcp_lro_active_insert(lc, bucket, le);
1964 /* Make sure the headers are set. */
1968 /* Store time this entry was allocated. */
1969 le->alloc_time = lc->lro_last_queue_time;
1971 tcp_set_entry_to_mbuf(lc, le, m, th);
1973 /* Now set the tail to "m". */
1974 le->m_last_mbuf = m;
1980 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
1984 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1985 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1986 (m->m_pkthdr.csum_data != 0xffff)) {
1988 * The checksum either did not have hardware offload
1989 * or it was a bad checksum. We can't LRO such
1992 counter_u64_add(tcp_bad_csums, 1);
1993 return (TCP_LRO_CANNOT);
1995 /* get current time */
1996 binuptime(&lc->lro_last_queue_time);
1997 CURVNET_SET(lc->ifp->if_vnet);
1998 error = tcp_lro_rx_common(lc, m, csum, true);
1999 if (__predict_false(error != 0)) {
2001 * Flush anything we have acummulated
2002 * ahead of this packet that can't
2003 * be LRO'd. This preserves order.
2005 tcp_lro_flush_active(lc);
2013 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
2017 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
2018 lc->lro_mbuf_max == 0)) {
2024 /* check if packet is not LRO capable */
2025 if (__predict_false((lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
2026 /* input packet to network layer */
2027 (*lc->ifp->if_input) (lc->ifp, mb);
2031 /* If no hardware or arrival stamp on the packet add timestamp */
2032 if ((tcplro_stacks_wanting_mbufq > 0) &&
2033 (tcp_less_accurate_lro_ts == 0) &&
2034 ((mb->m_flags & M_TSTMP) == 0)) {
2035 /* Add in an LRO time since no hardware */
2036 binuptime(&lc->lro_last_queue_time);
2037 mb->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
2038 mb->m_flags |= M_TSTMP_LRO;
2041 /* create sequence number */
2042 lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
2043 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
2044 (((uint64_t)mb->m_pkthdr.flowid) << 24) |
2045 ((uint64_t)lc->lro_mbuf_count);
2048 lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
2050 /* flush if array is full */
2051 if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
2052 tcp_lro_flush_all(lc);