2 * Copyright (c) 2007, Myricom Inc.
3 * Copyright (c) 2008, Intel Corporation.
4 * Copyright (c) 2012 The FreeBSD Foundation
5 * Copyright (c) 2016 Mellanox Technologies.
8 * Portions of this software were developed by Bjoern Zeeb
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_inet6.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/socket.h>
45 #include <sys/sysctl.h>
48 #include <net/if_var.h>
49 #include <net/ethernet.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip6.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_lro.h>
59 #include <netinet/tcp_var.h>
61 #include <netinet6/ip6_var.h>
63 #include <machine/in_cksum.h>
65 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
67 #define TCP_LRO_UPDATE_CSUM 1
68 #ifndef TCP_LRO_UPDATE_CSUM
69 #define TCP_LRO_INVALID_CSUM 0x0000
72 static void tcp_lro_rx_done(struct lro_ctrl *lc);
73 static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m,
74 uint32_t csum, int use_hash);
76 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
79 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
80 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
81 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
82 "default number of LRO entries");
85 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
89 LIST_INSERT_HEAD(&lc->lro_active, le, next);
90 LIST_INSERT_HEAD(bucket, le, hash_next);
94 tcp_lro_active_remove(struct lro_entry *le)
97 LIST_REMOVE(le, next); /* active list */
98 LIST_REMOVE(le, hash_next); /* hash bucket */
102 tcp_lro_init(struct lro_ctrl *lc)
104 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
108 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
109 unsigned lro_entries, unsigned lro_mbufs)
111 struct lro_entry *le;
113 unsigned i, elements;
115 lc->lro_bad_csum = 0;
118 lc->lro_mbuf_count = 0;
119 lc->lro_mbuf_max = lro_mbufs;
120 lc->lro_cnt = lro_entries;
121 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
122 lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
124 LIST_INIT(&lc->lro_free);
125 LIST_INIT(&lc->lro_active);
127 /* create hash table to accelerate entry lookup */
128 if (lro_entries > lro_mbufs)
129 elements = lro_entries;
131 elements = lro_mbufs;
132 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
134 if (lc->lro_hash == NULL) {
135 memset(lc, 0, sizeof(*lc));
139 /* compute size to allocate */
140 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
141 (lro_entries * sizeof(*le));
142 lc->lro_mbuf_data = (struct lro_mbuf_sort *)
143 malloc(size, M_LRO, M_NOWAIT | M_ZERO);
145 /* check for out of memory */
146 if (lc->lro_mbuf_data == NULL) {
147 free(lc->lro_hash, M_LRO);
148 memset(lc, 0, sizeof(*lc));
151 /* compute offset for LRO entries */
152 le = (struct lro_entry *)
153 (lc->lro_mbuf_data + lro_mbufs);
155 /* setup linked list */
156 for (i = 0; i != lro_entries; i++)
157 LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
163 tcp_lro_free(struct lro_ctrl *lc)
165 struct lro_entry *le;
168 /* reset LRO free list */
169 LIST_INIT(&lc->lro_free);
171 /* free active mbufs, if any */
172 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
173 tcp_lro_active_remove(le);
177 /* free hash table */
178 free(lc->lro_hash, M_LRO);
182 /* free mbuf array, if any */
183 for (x = 0; x != lc->lro_mbuf_count; x++)
184 m_freem(lc->lro_mbuf_data[x].mb);
185 lc->lro_mbuf_count = 0;
187 /* free allocated memory, if any */
188 free(lc->lro_mbuf_data, M_LRO);
189 lc->lro_mbuf_data = NULL;
192 #ifdef TCP_LRO_UPDATE_CSUM
194 tcp_lro_csum_th(struct tcphdr *th)
199 ch = th->th_sum = 0x0000;
210 ch = (ch >> 16) + (ch & 0xffff);
212 return (ch & 0xffff);
216 tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
217 uint16_t tcp_data_len, uint16_t csum)
224 /* Remove length from checksum. */
225 switch (le->eh_type) {
231 ip6 = (struct ip6_hdr *)l3hdr;
232 if (le->append_cnt == 0)
237 cx = ntohs(ip6->ip6_plen);
238 cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0);
248 ip4 = (struct ip *)l3hdr;
249 if (le->append_cnt == 0)
252 cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4),
254 cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr,
261 cs = 0; /* Keep compiler happy. */
267 /* Remove TCP header csum. */
268 cs = ~tcp_lro_csum_th(th);
271 c = (c >> 16) + (c & 0xffff);
278 tcp_lro_rx_done(struct lro_ctrl *lc)
280 struct lro_entry *le;
282 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
283 tcp_lro_active_remove(le);
284 tcp_lro_flush(lc, le);
289 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
291 struct lro_entry *le, *le_tmp;
294 if (LIST_EMPTY(&lc->lro_active))
298 timevalsub(&tv, timeout);
299 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
300 if (timevalcmp(&tv, &le->mtime, >=)) {
301 tcp_lro_active_remove(le);
302 tcp_lro_flush(lc, le);
308 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
311 if (le->append_cnt > 0) {
315 p_len = htons(le->p_len);
316 switch (le->eh_type) {
323 ip6->ip6_plen = p_len;
324 th = (struct tcphdr *)(ip6 + 1);
325 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
327 le->p_len += ETHER_HDR_LEN + sizeof(*ip6);
335 #ifdef TCP_LRO_UPDATE_CSUM
341 #ifdef TCP_LRO_UPDATE_CSUM
342 /* Fix IP header checksum for new length. */
348 cl = (cl >> 16) + (cl & 0xffff);
352 ip4->ip_sum = TCP_LRO_INVALID_CSUM;
355 th = (struct tcphdr *)(ip4 + 1);
356 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
357 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
358 le->p_len += ETHER_HDR_LEN;
363 th = NULL; /* Keep compiler happy. */
365 le->m_head->m_pkthdr.csum_data = 0xffff;
366 le->m_head->m_pkthdr.len = le->p_len;
368 /* Incorporate the latest ACK into the TCP header. */
369 th->th_ack = le->ack_seq;
370 th->th_win = le->window;
371 /* Incorporate latest timestamp into the TCP header. */
372 if (le->timestamp != 0) {
375 ts_ptr = (uint32_t *)(th + 1);
376 ts_ptr[1] = htonl(le->tsval);
377 ts_ptr[2] = le->tsecr;
379 #ifdef TCP_LRO_UPDATE_CSUM
380 /* Update the TCP header checksum. */
381 le->ulp_csum += p_len;
382 le->ulp_csum += tcp_lro_csum_th(th);
383 while (le->ulp_csum > 0xffff)
384 le->ulp_csum = (le->ulp_csum >> 16) +
385 (le->ulp_csum & 0xffff);
386 th->th_sum = (le->ulp_csum & 0xffff);
387 th->th_sum = ~th->th_sum;
389 th->th_sum = TCP_LRO_INVALID_CSUM;
393 le->m_head->m_pkthdr.lro_nsegs = le->append_cnt + 1;
394 (*lc->ifp->if_input)(lc->ifp, le->m_head);
395 lc->lro_queued += le->append_cnt + 1;
397 bzero(le, sizeof(*le));
398 LIST_INSERT_HEAD(&lc->lro_free, le, next);
401 #ifdef HAVE_INLINE_FLSLL
402 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
404 static inline uint64_t
405 tcp_lro_msb_64(uint64_t x)
413 return (x & ~(x >> 1));
418 * The tcp_lro_sort() routine is comparable to qsort(), except it has
419 * a worst case complexity limit of O(MIN(N,64)*N), where N is the
420 * number of elements to sort and 64 is the number of sequence bits
421 * available. The algorithm is bit-slicing the 64-bit sequence number,
422 * sorting one bit at a time from the most significant bit until the
423 * least significant one, skipping the constant bits. This is
424 * typically called a radix sort.
427 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
429 struct lro_mbuf_sort temp;
436 /* for small arrays insertion sort is faster */
438 for (x = 1; x < size; x++) {
440 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
441 parray[y] = parray[y - 1];
447 /* compute sequence bits which are constant */
450 for (x = 0; x != size; x++) {
451 ones |= parray[x].seq;
452 zeros |= ~parray[x].seq;
455 /* compute bits which are not constant into "ones" */
460 /* pick the most significant bit which is not constant */
461 ones = tcp_lro_msb_64(ones);
464 * Move entries having cleared sequence bits to the beginning
467 for (x = y = 0; y != size; y++) {
469 if (parray[y].seq & ones)
473 parray[x] = parray[y];
478 KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
481 tcp_lro_sort(parray, x);
490 tcp_lro_flush_all(struct lro_ctrl *lc)
496 /* check if no mbufs to flush */
497 if (lc->lro_mbuf_count == 0)
500 /* sort all mbufs according to stream */
501 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
503 /* input data into LRO engine, stream by stream */
505 for (x = 0; x != lc->lro_mbuf_count; x++) {
509 mb = lc->lro_mbuf_data[x].mb;
511 /* get sequence number, masking away the packet index */
512 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
514 /* check for new stream */
518 /* flush active streams */
522 /* add packet to LRO engine */
523 if (tcp_lro_rx2(lc, mb, 0, 0) != 0) {
524 /* input packet to network layer */
525 (*lc->ifp->if_input)(lc->ifp, mb);
531 /* flush active streams */
534 lc->lro_mbuf_count = 0;
539 tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6,
543 /* XXX-BZ we should check the flow-label. */
545 /* XXX-BZ We do not yet support ext. hdrs. */
546 if (ip6->ip6_nxt != IPPROTO_TCP)
547 return (TCP_LRO_NOT_SUPPORTED);
549 /* Find the TCP header. */
550 *th = (struct tcphdr *)(ip6 + 1);
558 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4,
564 if (ip4->ip_p != IPPROTO_TCP)
565 return (TCP_LRO_NOT_SUPPORTED);
567 /* Ensure there are no options. */
568 if ((ip4->ip_hl << 2) != sizeof (*ip4))
569 return (TCP_LRO_CANNOT);
571 /* .. and the packet is not fragmented. */
572 if (ip4->ip_off & htons(IP_MF|IP_OFFMASK))
573 return (TCP_LRO_CANNOT);
575 /* Legacy IP has a header checksum that needs to be correct. */
576 csum_flags = m->m_pkthdr.csum_flags;
577 if (csum_flags & CSUM_IP_CHECKED) {
578 if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) {
580 return (TCP_LRO_CANNOT);
583 csum = in_cksum_hdr(ip4);
584 if (__predict_false((csum) != 0)) {
586 return (TCP_LRO_CANNOT);
590 /* Find the TCP header (we assured there are no IP options). */
591 *th = (struct tcphdr *)(ip4 + 1);
598 tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
600 struct lro_entry *le;
601 struct ether_header *eh;
603 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
606 struct ip *ip4 = NULL; /* Keep compiler happy. */
609 void *l3hdr = NULL; /* Keep compiler happy. */
612 int error, ip_len, l;
613 uint16_t eh_type, tcp_data_len;
614 struct lro_head *bucket;
617 /* We expect a contiguous header [eh, ip, tcp]. */
619 eh = mtod(m, struct ether_header *);
620 eh_type = ntohs(eh->ether_type);
625 CURVNET_SET(lc->ifp->if_vnet);
626 if (V_ip6_forwarding != 0) {
627 /* XXX-BZ stats but changing lro_ctrl is a problem. */
629 return (TCP_LRO_CANNOT);
632 l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
633 error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
636 tcp_data_len = ntohs(ip6->ip6_plen);
637 ip_len = sizeof(*ip6) + tcp_data_len;
644 CURVNET_SET(lc->ifp->if_vnet);
645 if (V_ipforwarding != 0) {
646 /* XXX-BZ stats but changing lro_ctrl is a problem. */
648 return (TCP_LRO_CANNOT);
651 l3hdr = ip4 = (struct ip *)(eh + 1);
652 error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
655 ip_len = ntohs(ip4->ip_len);
656 tcp_data_len = ip_len - sizeof(*ip4);
660 /* XXX-BZ what happens in case of VLAN(s)? */
662 return (TCP_LRO_NOT_SUPPORTED);
666 * If the frame is padded beyond the end of the IP packet, then we must
667 * trim the extra bytes off.
669 l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len);
672 /* Truncated packet. */
673 return (TCP_LRO_CANNOT);
679 * Check TCP header constraints.
681 /* Ensure no bits set besides ACK or PSH. */
682 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
683 if (th->th_flags & TH_SYN)
684 return (TCP_LRO_CANNOT);
686 * Make sure that previously seen segements/ACKs are delivered
687 * before this segement, e.g. FIN.
692 /* XXX-BZ We lose a ACK|PUSH flag concatenating multiple segments. */
693 /* XXX-BZ Ideally we'd flush on PUSH? */
696 * Check for timestamps.
697 * Since the only option we handle are timestamps, we only have to
698 * handle the simple case of aligned timestamps.
700 l = (th->th_off << 2);
703 ts_ptr = (uint32_t *)(th + 1);
704 if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
705 (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
706 TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
708 * Make sure that previously seen segements/ACKs are delivered
709 * before this segement.
714 /* If the driver did not pass in the checksum, set it now. */
718 seq = ntohl(th->th_seq);
721 bucket = &lc->lro_hash[0];
722 } else if (M_HASHTYPE_ISHASH(m)) {
723 bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz];
730 hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr;
735 hash = ip6->ip6_src.s6_addr32[0] +
736 ip6->ip6_dst.s6_addr32[0];
737 hash += ip6->ip6_src.s6_addr32[1] +
738 ip6->ip6_dst.s6_addr32[1];
739 hash += ip6->ip6_src.s6_addr32[2] +
740 ip6->ip6_dst.s6_addr32[2];
741 hash += ip6->ip6_src.s6_addr32[3] +
742 ip6->ip6_dst.s6_addr32[3];
749 hash += th->th_sport + th->th_dport;
750 bucket = &lc->lro_hash[hash % lc->lro_hashsz];
753 /* Try to find a matching previous segment. */
754 LIST_FOREACH(le, bucket, hash_next) {
755 if (le->eh_type != eh_type)
757 if (le->source_port != th->th_sport ||
758 le->dest_port != th->th_dport)
763 if (bcmp(&le->source_ip6, &ip6->ip6_src,
764 sizeof(struct in6_addr)) != 0 ||
765 bcmp(&le->dest_ip6, &ip6->ip6_dst,
766 sizeof(struct in6_addr)) != 0)
772 if (le->source_ip4 != ip4->ip_src.s_addr ||
773 le->dest_ip4 != ip4->ip_dst.s_addr)
780 /* Timestamps mismatch; this is a FIN, etc */
781 tcp_lro_active_remove(le);
782 tcp_lro_flush(lc, le);
783 return (TCP_LRO_CANNOT);
786 /* Flush now if appending will result in overflow. */
787 if (le->p_len > (lc->lro_length_lim - tcp_data_len)) {
788 tcp_lro_active_remove(le);
789 tcp_lro_flush(lc, le);
793 /* Try to append the new segment. */
794 if (__predict_false(seq != le->next_seq ||
795 (tcp_data_len == 0 && le->ack_seq == th->th_ack))) {
796 /* Out of order packet or duplicate ACK. */
797 tcp_lro_active_remove(le);
798 tcp_lro_flush(lc, le);
799 return (TCP_LRO_CANNOT);
803 uint32_t tsval = ntohl(*(ts_ptr + 1));
804 /* Make sure timestamp values are increasing. */
805 /* XXX-BZ flip and use TSTMP_GEQ macro for this? */
806 if (__predict_false(le->tsval > tsval ||
808 return (TCP_LRO_CANNOT);
810 le->tsecr = *(ts_ptr + 2);
813 le->next_seq += tcp_data_len;
814 le->ack_seq = th->th_ack;
815 le->window = th->th_win;
818 #ifdef TCP_LRO_UPDATE_CSUM
819 le->ulp_csum += tcp_lro_rx_csum_fixup(le, l3hdr, th,
820 tcp_data_len, ~csum);
823 if (tcp_data_len == 0) {
826 * Flush this LRO entry, if this ACK should not
827 * be further delayed.
829 if (le->append_cnt >= lc->lro_ackcnt_lim) {
830 tcp_lro_active_remove(le);
831 tcp_lro_flush(lc, le);
836 le->p_len += tcp_data_len;
839 * Adjust the mbuf so that m_data points to the first byte of
840 * the ULP payload. Adjust the mbuf to avoid complications and
841 * append new segment to existing mbuf chain.
843 m_adj(m, m->m_pkthdr.len - tcp_data_len);
846 le->m_tail->m_next = m;
847 le->m_tail = m_last(m);
850 * If a possible next full length packet would cause an
851 * overflow, pro-actively flush now.
853 if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) {
854 tcp_lro_active_remove(le);
855 tcp_lro_flush(lc, le);
857 getmicrotime(&le->mtime);
864 * Nothing to flush, but this segment can not be further
865 * aggregated/delayed.
867 return (TCP_LRO_CANNOT);
870 /* Try to find an empty slot. */
871 if (LIST_EMPTY(&lc->lro_free))
872 return (TCP_LRO_NO_ENTRIES);
874 /* Start a new segment chain. */
875 le = LIST_FIRST(&lc->lro_free);
876 LIST_REMOVE(le, next);
877 tcp_lro_active_insert(lc, bucket, le);
878 getmicrotime(&le->mtime);
880 /* Start filling in details. */
885 le->source_ip6 = ip6->ip6_src;
886 le->dest_ip6 = ip6->ip6_dst;
887 le->eh_type = eh_type;
888 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
894 le->source_ip4 = ip4->ip_src.s_addr;
895 le->dest_ip4 = ip4->ip_dst.s_addr;
896 le->eh_type = eh_type;
897 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
901 le->source_port = th->th_sport;
902 le->dest_port = th->th_dport;
904 le->next_seq = seq + tcp_data_len;
905 le->ack_seq = th->th_ack;
906 le->window = th->th_win;
909 le->tsval = ntohl(*(ts_ptr + 1));
910 le->tsecr = *(ts_ptr + 2);
913 #ifdef TCP_LRO_UPDATE_CSUM
915 * Do not touch the csum of the first packet. However save the
916 * "adjusted" checksum of just the source and destination addresses,
917 * the next header and the TCP payload. The length and TCP header
918 * parts may change, so we remove those from the saved checksum and
919 * re-add with final values on tcp_lro_flush() if needed.
921 KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n",
922 __func__, le, le->ulp_csum));
924 le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
926 th->th_sum = csum; /* Restore checksum on first packet. */
930 le->m_tail = m_last(m);
936 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
939 return tcp_lro_rx2(lc, m, csum, 1);
943 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
946 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
947 lc->lro_mbuf_max == 0)) {
953 /* check if packet is not LRO capable */
954 if (__predict_false(mb->m_pkthdr.csum_flags == 0 ||
955 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
959 /* input packet to network layer */
960 (*lc->ifp->if_input) (lc->ifp, mb);
964 /* check if array is full */
965 if (__predict_false(lc->lro_mbuf_count == lc->lro_mbuf_max))
966 tcp_lro_flush_all(lc);
968 /* create sequence number */
969 lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
970 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
971 (((uint64_t)mb->m_pkthdr.flowid) << 24) |
972 ((uint64_t)lc->lro_mbuf_count);
975 lc->lro_mbuf_data[lc->lro_mbuf_count++].mb = mb;