2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2007, Myricom Inc.
5 * Copyright (c) 2008, Intel Corporation.
6 * Copyright (c) 2012 The FreeBSD Foundation
7 * Copyright (c) 2016-2021 Mellanox Technologies.
10 * Portions of this software were developed by Bjoern Zeeb
11 * under sponsorship from the FreeBSD Foundation.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 #include "opt_inet6.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sockbuf.h>
49 #include <sys/sysctl.h>
52 #include <net/if_var.h>
53 #include <net/ethernet.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip6.h>
60 #include <netinet/ip.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/in_pcb.h>
63 #include <netinet6/in6_pcb.h>
64 #include <netinet/tcp.h>
65 #include <netinet/tcp_seq.h>
66 #include <netinet/tcp_lro.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/tcpip.h>
69 #include <netinet/tcp_hpts.h>
70 #include <netinet/tcp_log_buf.h>
71 #include <netinet/udp.h>
72 #include <netinet6/ip6_var.h>
74 #include <machine/in_cksum.h>
76 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
78 #define TCP_LRO_TS_OPTION \
79 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
80 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
82 static void tcp_lro_rx_done(struct lro_ctrl *lc);
83 static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
84 uint32_t csum, bool use_hash);
87 static bool do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *,
88 struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **, bool *, bool);
92 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
95 static long tcplro_stacks_wanting_mbufq;
96 counter_u64_t tcp_inp_lro_direct_queue;
97 counter_u64_t tcp_inp_lro_wokeup_queue;
98 counter_u64_t tcp_inp_lro_compressed;
99 counter_u64_t tcp_inp_lro_locks_taken;
100 counter_u64_t tcp_extra_mbuf;
101 counter_u64_t tcp_would_have_but;
102 counter_u64_t tcp_comp_total;
103 counter_u64_t tcp_uncomp_total;
104 counter_u64_t tcp_bad_csums;
106 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
107 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
108 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
109 "default number of LRO entries");
111 static uint32_t tcp_lro_cpu_set_thresh = TCP_LRO_CPU_DECLARATION_THRESH;
112 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_cpu_threshold,
113 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_cpu_set_thresh, 0,
114 "Number of interrups in a row on the same CPU that will make us declare an 'affinity' cpu?");
116 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD,
117 &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport");
118 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD,
119 &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts");
120 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD,
121 &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport");
122 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD,
123 &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken");
124 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD,
125 &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp");
126 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD,
127 &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed");
128 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD,
129 &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set");
130 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD,
131 &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP");
132 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lro_badcsum, CTLFLAG_RD,
133 &tcp_bad_csums, "Number of packets that the common code saw with bad csums");
136 tcp_lro_reg_mbufq(void)
138 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1);
142 tcp_lro_dereg_mbufq(void)
144 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1);
148 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
149 struct lro_entry *le)
152 LIST_INSERT_HEAD(&lc->lro_active, le, next);
153 LIST_INSERT_HEAD(bucket, le, hash_next);
157 tcp_lro_active_remove(struct lro_entry *le)
160 LIST_REMOVE(le, next); /* active list */
161 LIST_REMOVE(le, hash_next); /* hash bucket */
165 tcp_lro_init(struct lro_ctrl *lc)
167 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
171 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
172 unsigned lro_entries, unsigned lro_mbufs)
174 struct lro_entry *le;
176 unsigned i, elements;
178 lc->lro_bad_csum = 0;
181 lc->lro_mbuf_count = 0;
182 lc->lro_mbuf_max = lro_mbufs;
183 lc->lro_cnt = lro_entries;
184 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
185 lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
187 LIST_INIT(&lc->lro_free);
188 LIST_INIT(&lc->lro_active);
190 /* create hash table to accelerate entry lookup */
191 if (lro_entries > lro_mbufs)
192 elements = lro_entries;
194 elements = lro_mbufs;
195 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
197 if (lc->lro_hash == NULL) {
198 memset(lc, 0, sizeof(*lc));
202 /* compute size to allocate */
203 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
204 (lro_entries * sizeof(*le));
205 lc->lro_mbuf_data = (struct lro_mbuf_sort *)
206 malloc(size, M_LRO, M_NOWAIT | M_ZERO);
208 /* check for out of memory */
209 if (lc->lro_mbuf_data == NULL) {
210 free(lc->lro_hash, M_LRO);
211 memset(lc, 0, sizeof(*lc));
214 /* compute offset for LRO entries */
215 le = (struct lro_entry *)
216 (lc->lro_mbuf_data + lro_mbufs);
218 /* setup linked list */
219 for (i = 0; i != lro_entries; i++)
220 LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
225 struct vxlan_header {
231 tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan, int mlen)
233 const struct ether_vlan_header *eh;
238 memset(parser, 0, sizeof(*parser));
243 const struct vxlan_header *vxh;
245 ptr = (uint8_t *)ptr + sizeof(*vxh);
247 parser->data.vxlan_vni =
248 vxh->vxlh_vni & htonl(0xffffff00);
253 if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) {
254 eth_type = eh->evl_proto;
256 /* strip priority and keep VLAN ID only */
257 parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK);
259 /* advance to next header */
260 ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
261 mlen -= (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
263 eth_type = eh->evl_encap_proto;
264 /* advance to next header */
265 mlen -= ETHER_HDR_LEN;
266 ptr = (uint8_t *)ptr + ETHER_HDR_LEN;
268 if (__predict_false(mlen <= 0))
272 case htons(ETHERTYPE_IP):
274 if (__predict_false(mlen < sizeof(struct ip)))
276 /* Ensure there are no IPv4 options. */
277 if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4))
279 /* .. and the packet is not fragmented. */
280 if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK))
282 ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2);
283 mlen -= sizeof(struct ip);
285 parser->data.s_addr.v4 = parser->ip4->ip_src;
286 parser->data.d_addr.v4 = parser->ip4->ip_dst;
288 switch (parser->ip4->ip_p) {
290 if (__predict_false(mlen < sizeof(struct udphdr)))
294 parser->data.lro_type = LRO_TYPE_IPV4_UDP;
295 parser->data.s_port = parser->udp->uh_sport;
296 parser->data.d_port = parser->udp->uh_dport;
298 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP);
300 ptr = ((uint8_t *)ptr + sizeof(*parser->udp));
301 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
305 if (__predict_false(mlen < sizeof(struct tcphdr)))
308 parser->data.lro_type = LRO_TYPE_IPV4_TCP;
309 parser->data.s_port = parser->tcp->th_sport;
310 parser->data.d_port = parser->tcp->th_dport;
312 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP);
314 if (__predict_false(mlen < (parser->tcp->th_off << 2)))
316 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
317 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
325 case htons(ETHERTYPE_IPV6):
327 if (__predict_false(mlen < sizeof(struct ip6_hdr)))
329 ptr = (uint8_t *)ptr + sizeof(*parser->ip6);
331 parser->data.s_addr.v6 = parser->ip6->ip6_src;
332 parser->data.d_addr.v6 = parser->ip6->ip6_dst;
334 mlen -= sizeof(struct ip6_hdr);
335 switch (parser->ip6->ip6_nxt) {
337 if (__predict_false(mlen < sizeof(struct udphdr)))
341 parser->data.lro_type = LRO_TYPE_IPV6_UDP;
342 parser->data.s_port = parser->udp->uh_sport;
343 parser->data.d_port = parser->udp->uh_dport;
345 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP);
347 ptr = (uint8_t *)ptr + sizeof(*parser->udp);
348 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
351 if (__predict_false(mlen < sizeof(struct tcphdr)))
355 parser->data.lro_type = LRO_TYPE_IPV6_TCP;
356 parser->data.s_port = parser->tcp->th_sport;
357 parser->data.d_port = parser->tcp->th_dport;
359 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP);
361 if (__predict_false(mlen < (parser->tcp->th_off << 2)))
363 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
364 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
374 /* Invalid packet - cannot parse */
378 static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
379 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
381 static inline struct lro_parser *
382 tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data)
386 /* Try to parse outer headers first. */
387 data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false, m->m_len);
388 if (data_ptr == NULL || po->total_hdr_len > m->m_len)
392 /* Store VLAN ID, if any. */
393 if (__predict_false(m->m_flags & M_VLANTAG)) {
395 htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK);
397 /* Store decrypted flag, if any. */
398 if (__predict_false(m->m_flags & M_DECRYPTED))
399 po->data.lro_flags |= LRO_FLAG_DECRYPTED;
402 switch (po->data.lro_type) {
403 case LRO_TYPE_IPV4_UDP:
404 case LRO_TYPE_IPV6_UDP:
405 /* Check for VXLAN headers. */
406 if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum)
409 /* Try to parse inner headers. */
410 data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true,
411 (m->m_len - ((caddr_t)data_ptr - m->m_data)));
412 if (data_ptr == NULL || (pi->total_hdr_len + po->total_hdr_len) > m->m_len)
415 /* Verify supported header types. */
416 switch (pi->data.lro_type) {
417 case LRO_TYPE_IPV4_TCP:
418 case LRO_TYPE_IPV6_TCP:
424 case LRO_TYPE_IPV4_TCP:
425 case LRO_TYPE_IPV6_TCP:
427 memset(pi, 0, sizeof(*pi));
436 tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po)
440 switch (po->data.lro_type) {
442 case LRO_TYPE_IPV4_TCP:
443 len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) +
444 ntohs(po->ip4->ip_len);
448 case LRO_TYPE_IPV6_TCP:
449 len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) +
450 ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6);
454 return (TCP_LRO_CANNOT);
458 * If the frame is padded beyond the end of the IP packet,
459 * then trim the extra bytes off:
461 if (__predict_true(m->m_pkthdr.len == len)) {
463 } else if (m->m_pkthdr.len > len) {
464 m_adj(m, len - m->m_pkthdr.len);
467 return (TCP_LRO_CANNOT);
470 static struct tcphdr *
471 tcp_lro_get_th(struct mbuf *m)
473 return ((struct tcphdr *)((uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off));
477 lro_free_mbuf_chain(struct mbuf *m)
490 tcp_lro_free(struct lro_ctrl *lc)
492 struct lro_entry *le;
495 /* reset LRO free list */
496 LIST_INIT(&lc->lro_free);
498 /* free active mbufs, if any */
499 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
500 tcp_lro_active_remove(le);
501 lro_free_mbuf_chain(le->m_head);
504 /* free hash table */
505 free(lc->lro_hash, M_LRO);
509 /* free mbuf array, if any */
510 for (x = 0; x != lc->lro_mbuf_count; x++)
511 m_freem(lc->lro_mbuf_data[x].mb);
512 lc->lro_mbuf_count = 0;
514 /* free allocated memory, if any */
515 free(lc->lro_mbuf_data, M_LRO);
516 lc->lro_mbuf_data = NULL;
520 tcp_lro_rx_csum_tcphdr(const struct tcphdr *th)
526 csum = -th->th_sum; /* exclude checksum field */
528 ptr = (const uint16_t *)th;
535 while (csum > 0xffff)
536 csum = (csum >> 16) + (csum & 0xffff);
542 tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum)
549 switch (pa->data.lro_type) {
551 case LRO_TYPE_IPV6_TCP:
552 /* Compute full pseudo IPv6 header checksum. */
553 cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0);
557 case LRO_TYPE_IPV4_TCP:
558 /* Compute full pseudo IPv4 header checsum. */
559 cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP);
560 cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs));
564 cs = 0; /* Keep compiler happy. */
568 /* Complement checksum. */
572 /* Remove TCP header checksum. */
573 cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp);
576 /* Compute checksum remainder. */
578 c = (c >> 16) + (c & 0xffff);
584 tcp_lro_rx_done(struct lro_ctrl *lc)
586 struct lro_entry *le;
588 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
589 tcp_lro_active_remove(le);
590 tcp_lro_flush(lc, le);
595 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
597 struct lro_entry *le, *le_tmp;
602 if (LIST_EMPTY(&lc->lro_active))
605 /* get timeout time and current time in ns */
607 now = bintime2ns(&bt);
608 tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000));
609 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
610 if (now >= (bintime2ns(&le->alloc_time) + tov)) {
611 tcp_lro_active_remove(le);
612 tcp_lro_flush(lc, le);
619 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4)
623 /* Legacy IP has a header checksum that needs to be correct. */
624 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
625 if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) {
627 return (TCP_LRO_CANNOT);
630 csum = in_cksum_hdr(ip4);
631 if (__predict_false(csum != 0)) {
633 return (TCP_LRO_CANNOT);
642 tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc,
643 const struct lro_entry *le, const struct mbuf *m,
644 int frm, int32_t tcp_data_len, uint32_t th_seq,
645 uint32_t th_ack, uint16_t th_win)
647 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
648 union tcp_log_stackspecific log;
649 struct timeval tv, btv;
652 cts = tcp_get_usecs(&tv);
653 memset(&log, 0, sizeof(union tcp_log_stackspecific));
654 log.u_bbr.flex8 = frm;
655 log.u_bbr.flex1 = tcp_data_len;
657 log.u_bbr.flex2 = m->m_pkthdr.len;
660 log.u_bbr.flex3 = le->m_head->m_pkthdr.lro_nsegs;
661 log.u_bbr.flex4 = le->m_head->m_pkthdr.lro_tcp_d_len;
663 log.u_bbr.flex5 = le->m_head->m_pkthdr.len;
664 log.u_bbr.delRate = le->m_head->m_flags;
665 log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp;
667 log.u_bbr.inflight = th_seq;
668 log.u_bbr.delivered = th_ack;
669 log.u_bbr.timeStamp = cts;
670 log.u_bbr.epoch = le->next_seq;
671 log.u_bbr.lt_epoch = le->ack_seq;
672 log.u_bbr.pacing_gain = th_win;
673 log.u_bbr.cwnd_gain = le->window;
674 log.u_bbr.lost = curcpu;
675 log.u_bbr.cur_del_rate = (uintptr_t)m;
676 log.u_bbr.bw_inuse = (uintptr_t)le->m_head;
677 bintime2timeval(&lc->lro_last_queue_time, &btv);
678 log.u_bbr.flex6 = tcp_tv_to_usectick(&btv);
679 log.u_bbr.flex7 = le->compressed;
680 log.u_bbr.pacing_gain = le->uncompressed;
681 if (in_epoch(net_epoch_preempt))
682 log.u_bbr.inhpts = 1;
684 log.u_bbr.inhpts = 0;
685 TCP_LOG_EVENTP(tp, NULL,
686 &tp->t_inpcb->inp_socket->so_rcv,
687 &tp->t_inpcb->inp_socket->so_snd,
689 0, &log, false, &tv);
695 tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum)
699 csum = 0xffff - *ptr + value;
700 while (csum > 0xffff)
701 csum = (csum >> 16) + (csum & 0xffff);
707 tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le,
708 uint16_t payload_len, uint16_t delta_sum)
712 uint16_t temp[5] = {};
714 switch (pa->data.lro_type) {
715 case LRO_TYPE_IPV4_TCP:
716 /* Compute new IPv4 length. */
717 tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len;
718 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
720 /* Subtract delta from current IPv4 checksum. */
721 csum = pa->ip4->ip_sum + 0xffff - temp[0];
722 while (csum > 0xffff)
723 csum = (csum >> 16) + (csum & 0xffff);
724 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
725 goto update_tcp_header;
727 case LRO_TYPE_IPV6_TCP:
728 /* Compute new IPv6 length. */
729 tlen = (pa->tcp->th_off << 2) + payload_len;
730 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
731 goto update_tcp_header;
733 case LRO_TYPE_IPV4_UDP:
734 /* Compute new IPv4 length. */
735 tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len;
736 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
738 /* Subtract delta from current IPv4 checksum. */
739 csum = pa->ip4->ip_sum + 0xffff - temp[0];
740 while (csum > 0xffff)
741 csum = (csum >> 16) + (csum & 0xffff);
742 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
743 goto update_udp_header;
745 case LRO_TYPE_IPV6_UDP:
746 /* Compute new IPv6 length. */
747 tlen = sizeof(*pa->udp) + payload_len;
748 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
749 goto update_udp_header;
756 /* Compute current TCP header checksum. */
757 temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp);
759 /* Incorporate the latest ACK into the TCP header. */
760 pa->tcp->th_ack = le->ack_seq;
761 pa->tcp->th_win = le->window;
763 /* Incorporate latest timestamp into the TCP header. */
764 if (le->timestamp != 0) {
767 ts_ptr = (uint32_t *)(pa->tcp + 1);
768 ts_ptr[1] = htonl(le->tsval);
769 ts_ptr[2] = le->tsecr;
772 /* Compute new TCP header checksum. */
773 temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp);
775 /* Compute new TCP checksum. */
776 csum = pa->tcp->th_sum + 0xffff - delta_sum +
777 0xffff - temp[0] + 0xffff - temp[3] + temp[2];
778 while (csum > 0xffff)
779 csum = (csum >> 16) + (csum & 0xffff);
781 /* Assign new TCP checksum. */
782 tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]);
784 /* Compute all modififications affecting next checksum. */
785 csum = temp[0] + temp[1] + 0xffff - temp[2] +
786 temp[3] + temp[4] + delta_sum;
787 while (csum > 0xffff)
788 csum = (csum >> 16) + (csum & 0xffff);
790 /* Return delta checksum to next stage, if any. */
794 tlen = sizeof(*pa->udp) + payload_len;
795 /* Assign new UDP length and compute checksum delta. */
796 tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]);
798 /* Check if there is a UDP checksum. */
799 if (__predict_false(pa->udp->uh_sum != 0)) {
800 /* Compute new UDP checksum. */
801 csum = pa->udp->uh_sum + 0xffff - delta_sum +
802 0xffff - temp[0] + 0xffff - temp[2];
803 while (csum > 0xffff)
804 csum = (csum >> 16) + (csum & 0xffff);
805 /* Assign new UDP checksum. */
806 tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]);
809 /* Compute all modififications affecting next checksum. */
810 csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum;
811 while (csum > 0xffff)
812 csum = (csum >> 16) + (csum & 0xffff);
814 /* Return delta checksum to next stage, if any. */
819 tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
821 /* Check if we need to recompute any checksums. */
822 if (le->m_head->m_pkthdr.lro_nsegs > 1) {
825 switch (le->inner.data.lro_type) {
826 case LRO_TYPE_IPV4_TCP:
827 csum = tcp_lro_update_checksum(&le->inner, le,
828 le->m_head->m_pkthdr.lro_tcp_d_len,
829 le->m_head->m_pkthdr.lro_tcp_d_csum);
830 csum = tcp_lro_update_checksum(&le->outer, NULL,
831 le->m_head->m_pkthdr.lro_tcp_d_len +
832 le->inner.total_hdr_len, csum);
833 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
834 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
835 le->m_head->m_pkthdr.csum_data = 0xffff;
837 case LRO_TYPE_IPV6_TCP:
838 csum = tcp_lro_update_checksum(&le->inner, le,
839 le->m_head->m_pkthdr.lro_tcp_d_len,
840 le->m_head->m_pkthdr.lro_tcp_d_csum);
841 csum = tcp_lro_update_checksum(&le->outer, NULL,
842 le->m_head->m_pkthdr.lro_tcp_d_len +
843 le->inner.total_hdr_len, csum);
844 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
846 le->m_head->m_pkthdr.csum_data = 0xffff;
849 switch (le->outer.data.lro_type) {
850 case LRO_TYPE_IPV4_TCP:
851 csum = tcp_lro_update_checksum(&le->outer, le,
852 le->m_head->m_pkthdr.lro_tcp_d_len,
853 le->m_head->m_pkthdr.lro_tcp_d_csum);
854 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
855 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
856 le->m_head->m_pkthdr.csum_data = 0xffff;
858 case LRO_TYPE_IPV6_TCP:
859 csum = tcp_lro_update_checksum(&le->outer, le,
860 le->m_head->m_pkthdr.lro_tcp_d_len,
861 le->m_head->m_pkthdr.lro_tcp_d_csum);
862 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
864 le->m_head->m_pkthdr.csum_data = 0xffff;
876 * Break any chain, this is not set to NULL on the singleton
877 * case m_nextpkt points to m_head. Other case set them
878 * m_nextpkt to NULL in push_and_replace.
880 le->m_head->m_nextpkt = NULL;
881 lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs;
882 (*lc->ifp->if_input)(lc->ifp, le->m_head);
886 tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le,
887 struct mbuf *m, struct tcphdr *th)
890 uint16_t tcp_data_len;
891 uint16_t tcp_opt_len;
893 ts_ptr = (uint32_t *)(th + 1);
894 tcp_opt_len = (th->th_off << 2);
895 tcp_opt_len -= sizeof(*th);
897 /* Check if there is a timestamp option. */
898 if (tcp_opt_len == 0 ||
899 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
900 *ts_ptr != TCP_LRO_TS_OPTION)) {
901 /* We failed to find the timestamp option. */
905 le->tsval = ntohl(*(ts_ptr + 1));
906 le->tsecr = *(ts_ptr + 2);
909 tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
911 /* Pull out TCP sequence numbers and window size. */
912 le->next_seq = ntohl(th->th_seq) + tcp_data_len;
913 le->ack_seq = th->th_ack;
914 le->window = th->th_win;
916 /* Setup new data pointers. */
918 le->m_tail = m_last(m);
922 tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
924 struct lro_parser *pa;
927 * Push up the stack of the current entry
928 * and replace it with "m".
932 /* Grab off the next and save it */
933 msave = le->m_head->m_nextpkt;
934 le->m_head->m_nextpkt = NULL;
936 /* Now push out the old entry */
937 tcp_flush_out_entry(lc, le);
939 /* Re-parse new header, should not fail. */
940 pa = tcp_lro_parser(m, &le->outer, &le->inner, false);
942 ("tcp_push_and_replace: LRO parser failed on m=%p\n", m));
945 * Now to replace the data properly in the entry
946 * we have to reset the TCP header and
949 tcp_set_entry_to_mbuf(lc, le, m, pa->tcp);
951 /* Restore the next list */
952 m->m_nextpkt = msave;
956 tcp_lro_mbuf_append_pkthdr(struct mbuf *m, const struct mbuf *p)
960 if (m->m_pkthdr.lro_nsegs == 1) {
961 /* Compute relative checksum. */
962 csum = p->m_pkthdr.lro_tcp_d_csum;
964 /* Merge TCP data checksums. */
965 csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum +
966 (uint32_t)p->m_pkthdr.lro_tcp_d_csum;
967 while (csum > 0xffff)
968 csum = (csum >> 16) + (csum & 0xffff);
971 /* Update various counters. */
972 m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len;
973 m->m_pkthdr.lro_tcp_d_csum = csum;
974 m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len;
975 m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs;
979 tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
982 * Walk through the mbuf chain we
983 * have on tap and compress/condense
989 uint32_t tcp_data_len_total;
990 uint32_t tcp_data_seg_total;
991 uint16_t tcp_data_len;
992 uint16_t tcp_opt_len;
995 * First we must check the lead (m_head)
996 * we must make sure that it is *not*
997 * something that should be sent up
998 * right away (sack etc).
1001 m = le->m_head->m_nextpkt;
1003 /* Just one left. */
1007 th = tcp_lro_get_th(m);
1008 tcp_opt_len = (th->th_off << 2);
1009 tcp_opt_len -= sizeof(*th);
1010 ts_ptr = (uint32_t *)(th + 1);
1012 if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1013 *ts_ptr != TCP_LRO_TS_OPTION)) {
1015 * Its not the timestamp. We can't
1016 * use this guy as the head.
1018 le->m_head->m_nextpkt = m->m_nextpkt;
1019 tcp_push_and_replace(lc, le, m);
1022 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
1024 * Make sure that previously seen segements/ACKs are delivered
1025 * before this segment, e.g. FIN.
1027 le->m_head->m_nextpkt = m->m_nextpkt;
1028 tcp_push_and_replace(lc, le, m);
1031 while((m = le->m_head->m_nextpkt) != NULL) {
1033 * condense m into le, first
1034 * pull m out of the list.
1036 le->m_head->m_nextpkt = m->m_nextpkt;
1037 m->m_nextpkt = NULL;
1039 tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
1040 th = tcp_lro_get_th(m);
1041 ts_ptr = (uint32_t *)(th + 1);
1042 tcp_opt_len = (th->th_off << 2);
1043 tcp_opt_len -= sizeof(*th);
1044 tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len;
1045 tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs;
1047 if (tcp_data_seg_total >= lc->lro_ackcnt_lim ||
1048 tcp_data_len_total >= lc->lro_length_lim) {
1049 /* Flush now if appending will result in overflow. */
1050 tcp_push_and_replace(lc, le, m);
1053 if (tcp_opt_len != 0 &&
1054 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1055 *ts_ptr != TCP_LRO_TS_OPTION)) {
1057 * Maybe a sack in the new one? We need to
1058 * start all over after flushing the
1059 * current le. We will go up to the beginning
1060 * and flush it (calling the replace again possibly
1061 * or just returning).
1063 tcp_push_and_replace(lc, le, m);
1066 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
1067 tcp_push_and_replace(lc, le, m);
1070 if (tcp_opt_len != 0) {
1071 uint32_t tsval = ntohl(*(ts_ptr + 1));
1072 /* Make sure timestamp values are increasing. */
1073 if (TSTMP_GT(le->tsval, tsval)) {
1074 tcp_push_and_replace(lc, le, m);
1078 le->tsecr = *(ts_ptr + 2);
1080 /* Try to append the new segment. */
1081 if (__predict_false(ntohl(th->th_seq) != le->next_seq ||
1082 (tcp_data_len == 0 &&
1083 le->ack_seq == th->th_ack &&
1084 le->window == th->th_win))) {
1085 /* Out of order packet or duplicate ACK. */
1086 tcp_push_and_replace(lc, le, m);
1089 if (tcp_data_len != 0 ||
1090 SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1091 le->next_seq += tcp_data_len;
1092 le->ack_seq = th->th_ack;
1093 le->window = th->th_win;
1094 } else if (th->th_ack == le->ack_seq) {
1095 le->window = WIN_MAX(le->window, th->th_win);
1098 if (tcp_data_len == 0) {
1103 /* Merge TCP data checksum and length to head mbuf. */
1104 tcp_lro_mbuf_append_pkthdr(le->m_head, m);
1107 * Adjust the mbuf so that m_data points to the first byte of
1108 * the ULP payload. Adjust the mbuf to avoid complications and
1109 * append new segment to existing mbuf chain.
1111 m_adj(m, m->m_pkthdr.len - tcp_data_len);
1113 le->m_tail->m_next = m;
1114 le->m_tail = m_last(m);
1120 tcp_queue_pkts(struct inpcb *inp, struct tcpcb *tp, struct lro_entry *le)
1122 INP_WLOCK_ASSERT(inp);
1123 if (tp->t_in_pkt == NULL) {
1124 /* Nothing yet there */
1125 tp->t_in_pkt = le->m_head;
1126 tp->t_tail_pkt = le->m_last_mbuf;
1128 /* Already some there */
1129 tp->t_tail_pkt->m_nextpkt = le->m_head;
1130 tp->t_tail_pkt = le->m_last_mbuf;
1133 le->m_last_mbuf = NULL;
1136 static struct mbuf *
1137 tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le,
1138 struct inpcb *inp, int32_t *new_m)
1143 tp = intotcpcb(inp);
1144 if (__predict_false(tp == NULL))
1147 /* Look at the last mbuf if any in queue */
1149 if (m != NULL && (m->m_flags & M_ACKCMP) != 0) {
1150 if (M_TRAILINGSPACE(m) >= sizeof(struct tcp_ackent)) {
1151 tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0);
1153 counter_u64_add(tcp_extra_mbuf, 1);
1156 /* Mark we ran out of space */
1157 inp->inp_flags2 |= INP_MBUF_L_ACKS;
1160 /* Decide mbuf size. */
1161 if (inp->inp_flags2 & INP_MBUF_L_ACKS)
1162 m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR);
1164 m = m_gethdr(M_NOWAIT, MT_DATA);
1166 if (__predict_false(m == NULL)) {
1167 counter_u64_add(tcp_would_have_but, 1);
1170 counter_u64_add(tcp_comp_total, 1);
1171 m->m_flags |= M_ACKCMP;
1176 static struct inpcb *
1177 tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa)
1181 switch (pa->data.lro_type) {
1183 case LRO_TYPE_IPV6_TCP:
1184 inp = in6_pcblookup(&V_tcbinfo,
1185 &pa->data.s_addr.v6,
1187 &pa->data.d_addr.v6,
1194 case LRO_TYPE_IPV4_TCP:
1195 inp = in_pcblookup(&V_tcbinfo,
1212 tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th, uint32_t **ppts, bool *other_opts)
1215 * This function returns two bits of valuable information.
1216 * a) Is what is present capable of being ack-compressed,
1217 * we can ack-compress if there is no options or just
1218 * a timestamp option, and of course the th_flags must
1219 * be correct as well.
1220 * b) Our other options present such as SACK. This is
1221 * used to determine if we want to wakeup or not.
1225 switch (th->th_off << 2) {
1226 case (sizeof(*th) + TCPOLEN_TSTAMP_APPA):
1227 *ppts = (uint32_t *)(th + 1);
1228 /* Check if we have only one timestamp option. */
1229 if (**ppts == TCP_LRO_TS_OPTION)
1230 *other_opts = false;
1239 *other_opts = false;
1247 /* For ACKCMP we only accept ACK, PUSH, ECE and CWR. */
1248 if ((th->th_flags & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0)
1250 /* If it has data on it we cannot compress it */
1251 if (m->m_pkthdr.lro_tcp_d_len)
1254 /* ACK flag must be set. */
1255 if (!(th->th_flags & TH_ACK))
1261 tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le)
1265 struct mbuf **pp, *cmp, *mv_to;
1266 bool bpf_req, should_wake;
1268 /* Check if packet doesn't belongs to our network interface. */
1269 if ((tcplro_stacks_wanting_mbufq == 0) ||
1270 (le->outer.data.vlan_id != 0) ||
1271 (le->inner.data.lro_type != LRO_TYPE_NONE))
1272 return (TCP_LRO_CANNOT);
1276 * Be proactive about unspecified IPv6 address in source. As
1277 * we use all-zero to indicate unbounded/unconnected pcb,
1278 * unspecified IPv6 address can be used to confuse us.
1280 * Note that packets with unspecified IPv6 destination is
1281 * already dropped in ip6_input.
1283 if (__predict_false(le->outer.data.lro_type == LRO_TYPE_IPV6_TCP &&
1284 IN6_IS_ADDR_UNSPECIFIED(&le->outer.data.s_addr.v6)))
1285 return (TCP_LRO_CANNOT);
1287 if (__predict_false(le->inner.data.lro_type == LRO_TYPE_IPV6_TCP &&
1288 IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6)))
1289 return (TCP_LRO_CANNOT);
1291 /* Lookup inp, if any. */
1292 inp = tcp_lro_lookup(lc->ifp,
1293 (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner);
1295 return (TCP_LRO_CANNOT);
1297 counter_u64_add(tcp_inp_lro_locks_taken, 1);
1299 /* Get TCP control structure. */
1300 tp = intotcpcb(inp);
1302 /* Check if the inp is dead, Jim. */
1304 (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) ||
1305 (inp->inp_flags2 & INP_FREED)) {
1307 return (TCP_LRO_CANNOT);
1309 if ((inp->inp_irq_cpu_set == 0) && (lc->lro_cpu_is_set == 1)) {
1310 inp->inp_irq_cpu = lc->lro_last_cpu;
1311 inp->inp_irq_cpu_set = 1;
1313 /* Check if the transport doesn't support the needed optimizations. */
1314 if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) {
1316 return (TCP_LRO_CANNOT);
1319 if (inp->inp_flags2 & INP_MBUF_QUEUE_READY)
1320 should_wake = false;
1323 /* Check if packets should be tapped to BPF. */
1324 bpf_req = bpf_peers_present(lc->ifp->if_bpf);
1326 /* Strip and compress all the incoming packets. */
1328 for (pp = &le->m_head; *pp != NULL; ) {
1330 if (do_bpf_strip_and_compress(inp, lc, le, pp,
1331 &cmp, &mv_to, &should_wake, bpf_req ) == false) {
1332 /* Advance to next mbuf. */
1333 pp = &(*pp)->m_nextpkt;
1334 } else if (mv_to != NULL) {
1335 /* We are asked to move pp up */
1336 pp = &mv_to->m_nextpkt;
1339 /* Update "m_last_mbuf", if any. */
1340 if (pp == &le->m_head)
1341 le->m_last_mbuf = *pp;
1343 le->m_last_mbuf = __containerof(pp, struct mbuf, m_nextpkt);
1345 /* Check if any data mbufs left. */
1346 if (le->m_head != NULL) {
1347 counter_u64_add(tcp_inp_lro_direct_queue, 1);
1348 tcp_lro_log(tp, lc, le, NULL, 22, 1,
1349 inp->inp_flags2, inp->inp_in_input, 1);
1350 tcp_queue_pkts(inp, tp, le);
1354 counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
1355 if ((*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0))
1360 return (0); /* Success. */
1365 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
1367 /* Only optimise if there are multiple packets waiting. */
1374 CURVNET_SET(lc->ifp->if_vnet);
1375 error = tcp_lro_flush_tcphpts(lc, le);
1379 tcp_lro_condense(lc, le);
1380 tcp_flush_out_entry(lc, le);
1385 bzero(le, sizeof(*le));
1386 LIST_INSERT_HEAD(&lc->lro_free, le, next);
1389 #ifdef HAVE_INLINE_FLSLL
1390 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
1392 static inline uint64_t
1393 tcp_lro_msb_64(uint64_t x)
1401 return (x & ~(x >> 1));
1406 * The tcp_lro_sort() routine is comparable to qsort(), except it has
1407 * a worst case complexity limit of O(MIN(N,64)*N), where N is the
1408 * number of elements to sort and 64 is the number of sequence bits
1409 * available. The algorithm is bit-slicing the 64-bit sequence number,
1410 * sorting one bit at a time from the most significant bit until the
1411 * least significant one, skipping the constant bits. This is
1412 * typically called a radix sort.
1415 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
1417 struct lro_mbuf_sort temp;
1424 /* for small arrays insertion sort is faster */
1426 for (x = 1; x < size; x++) {
1428 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
1429 parray[y] = parray[y - 1];
1435 /* compute sequence bits which are constant */
1438 for (x = 0; x != size; x++) {
1439 ones |= parray[x].seq;
1440 zeros |= ~parray[x].seq;
1443 /* compute bits which are not constant into "ones" */
1448 /* pick the most significant bit which is not constant */
1449 ones = tcp_lro_msb_64(ones);
1452 * Move entries having cleared sequence bits to the beginning
1455 for (x = y = 0; y != size; y++) {
1457 if (parray[y].seq & ones)
1461 parray[x] = parray[y];
1466 KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
1469 tcp_lro_sort(parray, x);
1478 tcp_lro_flush_all(struct lro_ctrl *lc)
1485 /* check if no mbufs to flush */
1486 if (lc->lro_mbuf_count == 0)
1488 if (lc->lro_cpu_is_set == 0) {
1489 if (lc->lro_last_cpu == curcpu) {
1490 lc->lro_cnt_of_same_cpu++;
1491 /* Have we reached the threshold to declare a cpu? */
1492 if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh)
1493 lc->lro_cpu_is_set = 1;
1495 lc->lro_last_cpu = curcpu;
1496 lc->lro_cnt_of_same_cpu = 0;
1499 CURVNET_SET(lc->ifp->if_vnet);
1501 /* get current time */
1502 binuptime(&lc->lro_last_queue_time);
1504 /* sort all mbufs according to stream */
1505 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
1507 /* input data into LRO engine, stream by stream */
1509 for (x = 0; x != lc->lro_mbuf_count; x++) {
1513 mb = lc->lro_mbuf_data[x].mb;
1515 /* get sequence number, masking away the packet index */
1516 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
1518 /* check for new stream */
1522 /* flush active streams */
1523 tcp_lro_rx_done(lc);
1526 /* add packet to LRO engine */
1527 if (tcp_lro_rx_common(lc, mb, 0, false) != 0) {
1528 /* input packet to network layer */
1529 (*lc->ifp->if_input)(lc->ifp, mb);
1536 /* flush active streams */
1537 tcp_lro_rx_done(lc);
1542 lc->lro_mbuf_count = 0;
1547 build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m,
1548 uint32_t *ts_ptr, uint16_t iptos)
1551 * Given a TCP ACK, summarize it down into the small TCP ACK
1554 ae->timestamp = m->m_pkthdr.rcv_tstmp;
1555 if (m->m_flags & M_TSTMP_LRO)
1556 ae->flags = TSTMP_LRO;
1557 else if (m->m_flags & M_TSTMP)
1558 ae->flags = TSTMP_HDWR;
1559 ae->seq = ntohl(th->th_seq);
1560 ae->ack = ntohl(th->th_ack);
1561 ae->flags |= th->th_flags;
1562 if (ts_ptr != NULL) {
1563 ae->ts_value = ntohl(ts_ptr[1]);
1564 ae->ts_echo = ntohl(ts_ptr[2]);
1565 ae->flags |= HAS_TSTMP;
1567 ae->win = ntohs(th->th_win);
1568 ae->codepoint = iptos;
1572 * Do BPF tap for either ACK_CMP packets or MBUF QUEUE type packets
1573 * and strip all, but the IPv4/IPv6 header.
1576 do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc,
1577 struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to,
1578 bool *should_wake, bool bpf_req)
1583 struct ip6_hdr *ip6;
1588 struct tcp_ackent *ack_ent;
1591 bool other_opts, can_compress;
1597 /* Get current mbuf. */
1600 /* Let the BPF see the packet */
1601 if (__predict_false(bpf_req))
1602 ETHER_BPF_MTAP(lc->ifp, m);
1604 tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off;
1605 lro_type = le->inner.data.lro_type;
1608 lro_type = le->outer.data.lro_type;
1610 case LRO_TYPE_IPV4_TCP:
1611 tcp_hdr_offset -= sizeof(*le->outer.ip4);
1612 m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1614 case LRO_TYPE_IPV6_TCP:
1615 tcp_hdr_offset -= sizeof(*le->outer.ip6);
1616 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1622 case LRO_TYPE_IPV4_TCP:
1623 tcp_hdr_offset -= sizeof(*le->outer.ip4);
1624 m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1626 case LRO_TYPE_IPV6_TCP:
1627 tcp_hdr_offset -= sizeof(*le->outer.ip6);
1628 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1634 MPASS(tcp_hdr_offset >= 0);
1636 m_adj(m, tcp_hdr_offset);
1637 m->m_flags |= M_LRO_EHDRSTRP;
1638 m->m_flags &= ~M_ACKCMP;
1639 m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset;
1641 th = tcp_lro_get_th(m);
1643 th->th_sum = 0; /* TCP checksum is valid. */
1645 /* Check if ACK can be compressed */
1646 can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts);
1648 /* Now lets look at the should wake states */
1649 if ((other_opts == true) &&
1650 ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) {
1652 * If there are other options (SACK?) and the
1653 * tcp endpoint has not expressly told us it does
1654 * not care about SACKS, then we should wake up.
1656 *should_wake = true;
1658 /* Is the ack compressable? */
1659 if (can_compress == false)
1661 /* Does the TCP endpoint support ACK compression? */
1662 if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0)
1665 /* Lets get the TOS/traffic class field */
1666 l3.ptr = mtod(m, void *);
1668 case LRO_TYPE_IPV4_TCP:
1669 iptos = l3.ip4->ip_tos;
1671 case LRO_TYPE_IPV6_TCP:
1672 iptos = IPV6_TRAFFIC_CLASS(l3.ip6);
1675 iptos = 0; /* Keep compiler happy. */
1678 /* Now lets get space if we don't have some already */
1681 nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf);
1682 if (__predict_false(nm == NULL))
1687 * Link in the new cmp ack to our in-order place,
1688 * first set our cmp ack's next to where we are.
1693 * Set it up so mv_to is advanced to our
1694 * compressed ack. This way the caller can
1695 * advance pp to the right place.
1699 * Advance it here locally as well.
1701 pp = &nm->m_nextpkt;
1704 /* We have one already we are working on */
1706 if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) {
1707 /* We ran out of space */
1708 inp->inp_flags2 |= INP_MBUF_L_ACKS;
1712 MPASS(M_TRAILINGSPACE(nm) >= sizeof(struct tcp_ackent));
1713 counter_u64_add(tcp_inp_lro_compressed, 1);
1715 /* We can add in to the one on the tail */
1716 ack_ent = mtod(nm, struct tcp_ackent *);
1717 idx = (nm->m_len / sizeof(struct tcp_ackent));
1718 build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos);
1720 /* Bump the size of both pkt-hdr and len */
1721 nm->m_len += sizeof(struct tcp_ackent);
1722 nm->m_pkthdr.len += sizeof(struct tcp_ackent);
1724 /* Advance to next mbuf before freeing. */
1726 m->m_nextpkt = NULL;
1730 counter_u64_add(tcp_uncomp_total, 1);
1736 static struct lro_head *
1737 tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
1741 if (M_HASHTYPE_ISHASH(m)) {
1742 hash = m->m_pkthdr.flowid;
1744 for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++)
1745 hash += parser->data.raw[i];
1747 return (&lc->lro_hash[hash % lc->lro_hashsz]);
1751 tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
1753 struct lro_parser pi; /* inner address data */
1754 struct lro_parser po; /* outer address data */
1755 struct lro_parser *pa; /* current parser for TCP stream */
1756 struct lro_entry *le;
1757 struct lro_head *bucket;
1762 uint16_t tcp_data_sum;
1765 /* Quickly decide if packet cannot be LRO'ed */
1766 if (__predict_false(V_ipforwarding != 0))
1767 return (TCP_LRO_CANNOT);
1770 /* Quickly decide if packet cannot be LRO'ed */
1771 if (__predict_false(V_ip6_forwarding != 0))
1772 return (TCP_LRO_CANNOT);
1774 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1775 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1776 (m->m_pkthdr.csum_data != 0xffff)) {
1778 * The checksum either did not have hardware offload
1779 * or it was a bad checksum. We can't LRO such
1782 counter_u64_add(tcp_bad_csums, 1);
1783 return (TCP_LRO_CANNOT);
1785 /* We expect a contiguous header [eh, ip, tcp]. */
1786 pa = tcp_lro_parser(m, &po, &pi, true);
1787 if (__predict_false(pa == NULL))
1788 return (TCP_LRO_NOT_SUPPORTED);
1790 /* We don't expect any padding. */
1791 error = tcp_lro_trim_mbuf_chain(m, pa);
1792 if (__predict_false(error != 0))
1796 switch (pa->data.lro_type) {
1797 case LRO_TYPE_IPV4_TCP:
1798 error = tcp_lro_rx_ipv4(lc, m, pa->ip4);
1799 if (__predict_false(error != 0))
1806 /* If no hardware or arrival stamp on the packet add timestamp */
1807 if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) {
1808 m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1809 m->m_flags |= M_TSTMP_LRO;
1812 /* Get pointer to TCP header. */
1815 /* Don't process SYN packets. */
1816 if (__predict_false(th->th_flags & TH_SYN))
1817 return (TCP_LRO_CANNOT);
1819 /* Get total TCP header length and compute payload length. */
1820 tcp_opt_len = (th->th_off << 2);
1821 tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th -
1822 (uint8_t *)m->m_data) - tcp_opt_len;
1823 tcp_opt_len -= sizeof(*th);
1825 /* Don't process invalid TCP headers. */
1826 if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0))
1827 return (TCP_LRO_CANNOT);
1829 /* Compute TCP data only checksum. */
1830 if (tcp_data_len == 0)
1831 tcp_data_sum = 0; /* no data, no checksum */
1832 else if (__predict_false(csum != 0))
1833 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum);
1835 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum);
1837 /* Save TCP info in mbuf. */
1838 m->m_nextpkt = NULL;
1839 m->m_pkthdr.rcvif = lc->ifp;
1840 m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum;
1841 m->m_pkthdr.lro_tcp_d_len = tcp_data_len;
1842 m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data);
1843 m->m_pkthdr.lro_nsegs = 1;
1845 /* Get hash bucket. */
1847 bucket = &lc->lro_hash[0];
1849 bucket = tcp_lro_rx_get_bucket(lc, m, pa);
1852 /* Try to find a matching previous segment. */
1853 LIST_FOREACH(le, bucket, hash_next) {
1854 /* Compare addresses and ports. */
1855 if (lro_address_compare(&po.data, &le->outer.data) == false ||
1856 lro_address_compare(&pi.data, &le->inner.data) == false)
1859 /* Check if no data and old ACK. */
1860 if (tcp_data_len == 0 &&
1861 SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1866 /* Mark "m" in the last spot. */
1867 le->m_last_mbuf->m_nextpkt = m;
1868 /* Now set the tail to "m". */
1869 le->m_last_mbuf = m;
1873 /* Try to find an empty slot. */
1874 if (LIST_EMPTY(&lc->lro_free))
1875 return (TCP_LRO_NO_ENTRIES);
1877 /* Start a new segment chain. */
1878 le = LIST_FIRST(&lc->lro_free);
1879 LIST_REMOVE(le, next);
1880 tcp_lro_active_insert(lc, bucket, le);
1882 /* Make sure the headers are set. */
1886 /* Store time this entry was allocated. */
1887 le->alloc_time = lc->lro_last_queue_time;
1889 tcp_set_entry_to_mbuf(lc, le, m, th);
1891 /* Now set the tail to "m". */
1892 le->m_last_mbuf = m;
1898 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
1902 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1903 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1904 (m->m_pkthdr.csum_data != 0xffff)) {
1906 * The checksum either did not have hardware offload
1907 * or it was a bad checksum. We can't LRO such
1910 counter_u64_add(tcp_bad_csums, 1);
1911 return (TCP_LRO_CANNOT);
1913 /* get current time */
1914 binuptime(&lc->lro_last_queue_time);
1915 CURVNET_SET(lc->ifp->if_vnet);
1916 error = tcp_lro_rx_common(lc, m, csum, true);
1923 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
1927 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
1928 lc->lro_mbuf_max == 0)) {
1934 /* check if packet is not LRO capable */
1935 if (__predict_false((lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
1936 /* input packet to network layer */
1937 (*lc->ifp->if_input) (lc->ifp, mb);
1941 /* create sequence number */
1942 lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
1943 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
1944 (((uint64_t)mb->m_pkthdr.flowid) << 24) |
1945 ((uint64_t)lc->lro_mbuf_count);
1948 lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
1950 /* flush if array is full */
1951 if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
1952 tcp_lro_flush_all(lc);