2 * Copyright (c) 2007, Myricom Inc.
3 * Copyright (c) 2008, Intel Corporation.
4 * Copyright (c) 2012 The FreeBSD Foundation
5 * Copyright (c) 2016 Mellanox Technologies.
8 * Portions of this software were developed by Bjoern Zeeb
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "opt_inet6.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/socket.h>
45 #include <sys/sysctl.h>
48 #include <net/if_var.h>
49 #include <net/ethernet.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip6.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_lro.h>
59 #include <netinet/tcp_var.h>
61 #include <netinet6/ip6_var.h>
63 #include <machine/in_cksum.h>
65 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
67 #define TCP_LRO_UPDATE_CSUM 1
68 #ifndef TCP_LRO_UPDATE_CSUM
69 #define TCP_LRO_INVALID_CSUM 0x0000
72 static void tcp_lro_rx_done(struct lro_ctrl *lc);
73 static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m,
74 uint32_t csum, int use_hash);
76 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
79 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
80 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
81 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
82 "default number of LRO entries");
85 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
89 LIST_INSERT_HEAD(&lc->lro_active, le, next);
90 LIST_INSERT_HEAD(bucket, le, hash_next);
94 tcp_lro_active_remove(struct lro_entry *le)
97 LIST_REMOVE(le, next); /* active list */
98 LIST_REMOVE(le, hash_next); /* hash bucket */
102 tcp_lro_init(struct lro_ctrl *lc)
104 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
108 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
109 unsigned lro_entries, unsigned lro_mbufs)
111 struct lro_entry *le;
113 unsigned i, elements;
115 lc->lro_bad_csum = 0;
119 lc->lro_mbuf_count = 0;
120 lc->lro_mbuf_max = lro_mbufs;
121 lc->lro_cnt = lro_entries;
122 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
123 lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
125 LIST_INIT(&lc->lro_free);
126 LIST_INIT(&lc->lro_active);
128 /* create hash table to accelerate entry lookup */
129 if (lro_entries > lro_mbufs)
130 elements = lro_entries;
132 elements = lro_mbufs;
133 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
135 if (lc->lro_hash == NULL) {
136 memset(lc, 0, sizeof(*lc));
140 /* compute size to allocate */
141 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
142 (lro_entries * sizeof(*le));
143 lc->lro_mbuf_data = (struct lro_mbuf_sort *)
144 malloc(size, M_LRO, M_NOWAIT | M_ZERO);
146 /* check for out of memory */
147 if (lc->lro_mbuf_data == NULL) {
148 memset(lc, 0, sizeof(*lc));
151 /* compute offset for LRO entries */
152 le = (struct lro_entry *)
153 (lc->lro_mbuf_data + lro_mbufs);
155 /* setup linked list */
156 for (i = 0; i != lro_entries; i++)
157 LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
163 tcp_lro_free(struct lro_ctrl *lc)
165 struct lro_entry *le;
168 /* reset LRO free list */
169 LIST_INIT(&lc->lro_free);
171 /* free active mbufs, if any */
172 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
173 tcp_lro_active_remove(le);
177 /* free hash table */
178 if (lc->lro_hash != NULL) {
179 free(lc->lro_hash, M_LRO);
184 /* free mbuf array, if any */
185 for (x = 0; x != lc->lro_mbuf_count; x++)
186 m_freem(lc->lro_mbuf_data[x].mb);
187 lc->lro_mbuf_count = 0;
189 /* free allocated memory, if any */
190 free(lc->lro_mbuf_data, M_LRO);
191 lc->lro_mbuf_data = NULL;
194 #ifdef TCP_LRO_UPDATE_CSUM
196 tcp_lro_csum_th(struct tcphdr *th)
201 ch = th->th_sum = 0x0000;
212 ch = (ch >> 16) + (ch & 0xffff);
214 return (ch & 0xffff);
218 tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
219 uint16_t tcp_data_len, uint16_t csum)
226 /* Remove length from checksum. */
227 switch (le->eh_type) {
233 ip6 = (struct ip6_hdr *)l3hdr;
234 if (le->append_cnt == 0)
239 cx = ntohs(ip6->ip6_plen);
240 cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0);
250 ip4 = (struct ip *)l3hdr;
251 if (le->append_cnt == 0)
254 cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4),
256 cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr,
263 cs = 0; /* Keep compiler happy. */
269 /* Remove TCP header csum. */
270 cs = ~tcp_lro_csum_th(th);
273 c = (c >> 16) + (c & 0xffff);
280 tcp_lro_rx_done(struct lro_ctrl *lc)
282 struct lro_entry *le;
284 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
285 tcp_lro_active_remove(le);
286 tcp_lro_flush(lc, le);
291 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
293 struct lro_entry *le, *le_tmp;
296 if (LIST_EMPTY(&lc->lro_active))
300 timevalsub(&tv, timeout);
301 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
302 if (timevalcmp(&tv, &le->mtime, >=)) {
303 tcp_lro_active_remove(le);
304 tcp_lro_flush(lc, le);
310 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
313 if (le->append_cnt > 0) {
317 p_len = htons(le->p_len);
318 switch (le->eh_type) {
325 ip6->ip6_plen = p_len;
326 th = (struct tcphdr *)(ip6 + 1);
327 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
329 le->p_len += ETHER_HDR_LEN + sizeof(*ip6);
337 #ifdef TCP_LRO_UPDATE_CSUM
343 #ifdef TCP_LRO_UPDATE_CSUM
344 /* Fix IP header checksum for new length. */
350 cl = (cl >> 16) + (cl & 0xffff);
354 ip4->ip_sum = TCP_LRO_INVALID_CSUM;
357 th = (struct tcphdr *)(ip4 + 1);
358 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
359 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
360 le->p_len += ETHER_HDR_LEN;
365 th = NULL; /* Keep compiler happy. */
367 le->m_head->m_pkthdr.csum_data = 0xffff;
368 le->m_head->m_pkthdr.len = le->p_len;
370 /* Incorporate the latest ACK into the TCP header. */
371 th->th_ack = le->ack_seq;
372 th->th_win = le->window;
373 /* Incorporate latest timestamp into the TCP header. */
374 if (le->timestamp != 0) {
377 ts_ptr = (uint32_t *)(th + 1);
378 ts_ptr[1] = htonl(le->tsval);
379 ts_ptr[2] = le->tsecr;
381 #ifdef TCP_LRO_UPDATE_CSUM
382 /* Update the TCP header checksum. */
383 le->ulp_csum += p_len;
384 le->ulp_csum += tcp_lro_csum_th(th);
385 while (le->ulp_csum > 0xffff)
386 le->ulp_csum = (le->ulp_csum >> 16) +
387 (le->ulp_csum & 0xffff);
388 th->th_sum = (le->ulp_csum & 0xffff);
389 th->th_sum = ~th->th_sum;
391 th->th_sum = TCP_LRO_INVALID_CSUM;
395 (*lc->ifp->if_input)(lc->ifp, le->m_head);
396 lc->lro_queued += le->append_cnt + 1;
398 bzero(le, sizeof(*le));
399 LIST_INSERT_HEAD(&lc->lro_free, le, next);
402 #ifdef HAVE_INLINE_FLSLL
403 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
405 static inline uint64_t
406 tcp_lro_msb_64(uint64_t x)
414 return (x & ~(x >> 1));
419 * The tcp_lro_sort() routine is comparable to qsort(), except it has
420 * a worst case complexity limit of O(MIN(N,64)*N), where N is the
421 * number of elements to sort and 64 is the number of sequence bits
422 * available. The algorithm is bit-slicing the 64-bit sequence number,
423 * sorting one bit at a time from the most significant bit until the
424 * least significant one, skipping the constant bits. This is
425 * typically called a radix sort.
428 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
430 struct lro_mbuf_sort temp;
437 /* for small arrays insertion sort is faster */
439 for (x = 1; x < size; x++) {
441 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
442 parray[y] = parray[y - 1];
448 /* compute sequence bits which are constant */
451 for (x = 0; x != size; x++) {
452 ones |= parray[x].seq;
453 zeros |= ~parray[x].seq;
456 /* compute bits which are not constant into "ones" */
461 /* pick the most significant bit which is not constant */
462 ones = tcp_lro_msb_64(ones);
465 * Move entries having cleared sequence bits to the beginning
468 for (x = y = 0; y != size; y++) {
470 if (parray[y].seq & ones)
474 parray[x] = parray[y];
479 KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
482 tcp_lro_sort(parray, x);
491 tcp_lro_flush_all(struct lro_ctrl *lc)
497 /* check if no mbufs to flush */
498 if (lc->lro_mbuf_count == 0)
501 /* sort all mbufs according to stream */
502 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
504 /* input data into LRO engine, stream by stream */
506 for (x = 0; x != lc->lro_mbuf_count; x++) {
510 mb = lc->lro_mbuf_data[x].mb;
512 /* get sequence number, masking away the packet index */
513 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
515 /* check for new stream */
519 /* flush active streams */
523 /* add packet to LRO engine */
524 if (tcp_lro_rx2(lc, mb, 0, 0) != 0) {
525 /* input packet to network layer */
526 (*lc->ifp->if_input)(lc->ifp, mb);
532 /* flush active streams */
535 lc->lro_mbuf_count = 0;
540 tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6,
544 /* XXX-BZ we should check the flow-label. */
546 /* XXX-BZ We do not yet support ext. hdrs. */
547 if (ip6->ip6_nxt != IPPROTO_TCP)
548 return (TCP_LRO_NOT_SUPPORTED);
550 /* Find the TCP header. */
551 *th = (struct tcphdr *)(ip6 + 1);
559 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4,
565 if (ip4->ip_p != IPPROTO_TCP)
566 return (TCP_LRO_NOT_SUPPORTED);
568 /* Ensure there are no options. */
569 if ((ip4->ip_hl << 2) != sizeof (*ip4))
570 return (TCP_LRO_CANNOT);
572 /* .. and the packet is not fragmented. */
573 if (ip4->ip_off & htons(IP_MF|IP_OFFMASK))
574 return (TCP_LRO_CANNOT);
576 /* Legacy IP has a header checksum that needs to be correct. */
577 csum_flags = m->m_pkthdr.csum_flags;
578 if (csum_flags & CSUM_IP_CHECKED) {
579 if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) {
581 return (TCP_LRO_CANNOT);
584 csum = in_cksum_hdr(ip4);
585 if (__predict_false((csum) != 0)) {
587 return (TCP_LRO_CANNOT);
591 /* Find the TCP header (we assured there are no IP options). */
592 *th = (struct tcphdr *)(ip4 + 1);
599 tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
601 struct lro_entry *le;
602 struct ether_header *eh;
604 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
607 struct ip *ip4 = NULL; /* Keep compiler happy. */
610 void *l3hdr = NULL; /* Keep compiler happy. */
613 int error, ip_len, l;
614 uint16_t eh_type, tcp_data_len;
615 struct lro_head *bucket;
618 /* We expect a contiguous header [eh, ip, tcp]. */
620 eh = mtod(m, struct ether_header *);
621 eh_type = ntohs(eh->ether_type);
626 CURVNET_SET(lc->ifp->if_vnet);
627 if (V_ip6_forwarding != 0) {
628 /* XXX-BZ stats but changing lro_ctrl is a problem. */
630 return (TCP_LRO_CANNOT);
633 l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
634 error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
637 tcp_data_len = ntohs(ip6->ip6_plen);
638 ip_len = sizeof(*ip6) + tcp_data_len;
645 CURVNET_SET(lc->ifp->if_vnet);
646 if (V_ipforwarding != 0) {
647 /* XXX-BZ stats but changing lro_ctrl is a problem. */
649 return (TCP_LRO_CANNOT);
652 l3hdr = ip4 = (struct ip *)(eh + 1);
653 error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
656 ip_len = ntohs(ip4->ip_len);
657 tcp_data_len = ip_len - sizeof(*ip4);
661 /* XXX-BZ what happens in case of VLAN(s)? */
663 return (TCP_LRO_NOT_SUPPORTED);
667 * If the frame is padded beyond the end of the IP packet, then we must
668 * trim the extra bytes off.
670 l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len);
673 /* Truncated packet. */
674 return (TCP_LRO_CANNOT);
680 * Check TCP header constraints.
682 /* Ensure no bits set besides ACK or PSH. */
683 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
684 if (th->th_flags & TH_SYN)
685 return (TCP_LRO_CANNOT);
687 * Make sure that previously seen segements/ACKs are delivered
688 * before this segement, e.g. FIN.
693 /* XXX-BZ We lose a ACK|PUSH flag concatenating multiple segments. */
694 /* XXX-BZ Ideally we'd flush on PUSH? */
697 * Check for timestamps.
698 * Since the only option we handle are timestamps, we only have to
699 * handle the simple case of aligned timestamps.
701 l = (th->th_off << 2);
704 ts_ptr = (uint32_t *)(th + 1);
705 if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
706 (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
707 TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
709 * Make sure that previously seen segements/ACKs are delivered
710 * before this segement.
715 /* If the driver did not pass in the checksum, set it now. */
719 seq = ntohl(th->th_seq);
722 bucket = &lc->lro_hash[0];
723 } else if (M_HASHTYPE_ISHASH(m)) {
724 bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz];
731 hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr;
736 hash = ip6->ip6_src.s6_addr32[0] +
737 ip6->ip6_dst.s6_addr32[0];
738 hash += ip6->ip6_src.s6_addr32[1] +
739 ip6->ip6_dst.s6_addr32[1];
740 hash += ip6->ip6_src.s6_addr32[2] +
741 ip6->ip6_dst.s6_addr32[2];
742 hash += ip6->ip6_src.s6_addr32[3] +
743 ip6->ip6_dst.s6_addr32[3];
750 hash += th->th_sport + th->th_dport;
751 bucket = &lc->lro_hash[hash % lc->lro_hashsz];
754 /* Try to find a matching previous segment. */
755 LIST_FOREACH(le, bucket, hash_next) {
756 if (le->eh_type != eh_type)
758 if (le->source_port != th->th_sport ||
759 le->dest_port != th->th_dport)
764 if (bcmp(&le->source_ip6, &ip6->ip6_src,
765 sizeof(struct in6_addr)) != 0 ||
766 bcmp(&le->dest_ip6, &ip6->ip6_dst,
767 sizeof(struct in6_addr)) != 0)
773 if (le->source_ip4 != ip4->ip_src.s_addr ||
774 le->dest_ip4 != ip4->ip_dst.s_addr)
781 /* Timestamps mismatch; this is a FIN, etc */
782 tcp_lro_active_remove(le);
783 tcp_lro_flush(lc, le);
784 return (TCP_LRO_CANNOT);
787 /* Flush now if appending will result in overflow. */
788 if (le->p_len > (lc->lro_length_lim - tcp_data_len)) {
789 tcp_lro_active_remove(le);
790 tcp_lro_flush(lc, le);
794 /* Try to append the new segment. */
795 if (__predict_false(seq != le->next_seq ||
796 (tcp_data_len == 0 && le->ack_seq == th->th_ack))) {
797 /* Out of order packet or duplicate ACK. */
798 tcp_lro_active_remove(le);
799 tcp_lro_flush(lc, le);
800 return (TCP_LRO_CANNOT);
804 uint32_t tsval = ntohl(*(ts_ptr + 1));
805 /* Make sure timestamp values are increasing. */
806 /* XXX-BZ flip and use TSTMP_GEQ macro for this? */
807 if (__predict_false(le->tsval > tsval ||
809 return (TCP_LRO_CANNOT);
811 le->tsecr = *(ts_ptr + 2);
814 le->next_seq += tcp_data_len;
815 le->ack_seq = th->th_ack;
816 le->window = th->th_win;
819 #ifdef TCP_LRO_UPDATE_CSUM
820 le->ulp_csum += tcp_lro_rx_csum_fixup(le, l3hdr, th,
821 tcp_data_len, ~csum);
824 if (tcp_data_len == 0) {
827 * Flush this LRO entry, if this ACK should not
828 * be further delayed.
830 if (le->append_cnt >= lc->lro_ackcnt_lim) {
831 tcp_lro_active_remove(le);
832 tcp_lro_flush(lc, le);
837 le->p_len += tcp_data_len;
840 * Adjust the mbuf so that m_data points to the first byte of
841 * the ULP payload. Adjust the mbuf to avoid complications and
842 * append new segment to existing mbuf chain.
844 m_adj(m, m->m_pkthdr.len - tcp_data_len);
847 le->m_tail->m_next = m;
848 le->m_tail = m_last(m);
851 * If a possible next full length packet would cause an
852 * overflow, pro-actively flush now.
854 if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) {
855 tcp_lro_active_remove(le);
856 tcp_lro_flush(lc, le);
858 getmicrotime(&le->mtime);
865 * Nothing to flush, but this segment can not be further
866 * aggregated/delayed.
868 return (TCP_LRO_CANNOT);
871 /* Try to find an empty slot. */
872 if (LIST_EMPTY(&lc->lro_free))
873 return (TCP_LRO_NO_ENTRIES);
875 /* Start a new segment chain. */
876 le = LIST_FIRST(&lc->lro_free);
877 LIST_REMOVE(le, next);
878 tcp_lro_active_insert(lc, bucket, le);
879 getmicrotime(&le->mtime);
881 /* Start filling in details. */
886 le->source_ip6 = ip6->ip6_src;
887 le->dest_ip6 = ip6->ip6_dst;
888 le->eh_type = eh_type;
889 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
895 le->source_ip4 = ip4->ip_src.s_addr;
896 le->dest_ip4 = ip4->ip_dst.s_addr;
897 le->eh_type = eh_type;
898 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
902 le->source_port = th->th_sport;
903 le->dest_port = th->th_dport;
905 le->next_seq = seq + tcp_data_len;
906 le->ack_seq = th->th_ack;
907 le->window = th->th_win;
910 le->tsval = ntohl(*(ts_ptr + 1));
911 le->tsecr = *(ts_ptr + 2);
914 #ifdef TCP_LRO_UPDATE_CSUM
916 * Do not touch the csum of the first packet. However save the
917 * "adjusted" checksum of just the source and destination addresses,
918 * the next header and the TCP payload. The length and TCP header
919 * parts may change, so we remove those from the saved checksum and
920 * re-add with final values on tcp_lro_flush() if needed.
922 KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n",
923 __func__, le, le->ulp_csum));
925 le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
927 th->th_sum = csum; /* Restore checksum on first packet. */
931 le->m_tail = m_last(m);
937 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
940 return tcp_lro_rx2(lc, m, csum, 1);
944 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
947 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
948 lc->lro_mbuf_max == 0)) {
954 /* check if packet is not LRO capable */
955 if (__predict_false(mb->m_pkthdr.csum_flags == 0 ||
956 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
960 /* input packet to network layer */
961 (*lc->ifp->if_input) (lc->ifp, mb);
965 /* check if array is full */
966 if (__predict_false(lc->lro_mbuf_count == lc->lro_mbuf_max))
967 tcp_lro_flush_all(lc);
969 /* create sequence number */
970 lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
971 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
972 (((uint64_t)mb->m_pkthdr.flowid) << 24) |
973 ((uint64_t)lc->lro_mbuf_count);
976 lc->lro_mbuf_data[lc->lro_mbuf_count++].mb = mb;